source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
TBBHashBackend.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#pragma once
#include <tbb/concurrent_unordered_map.h>
#include <limits>
#include <unordered_map>
#include "open3d/core/hashmap/CPU/CPUHashBackendBufferAccessor.hpp"
#include "open3d/core/hashmap/DeviceHashBackend.h"
#include "open3d/utility/Parallel.h"
namespace open3d {
namespace core {
template <typename Key, typename Hash>
class TBBHashBackend : public DeviceHashBackend {
public:
TBBHashBackend(int64_t init_capacity,
int64_t key_dsize,
const std::vector<int64_t>& value_dsizes,
const Device& device);
~TBBHashBackend();
void Rehash(int64_t buckets) override;
void Insert(const void* input_keys,
const std::vector<const void*>& input_values_soa,
buf_index_t* output_buf_indices,
bool* output_masks,
int64_t count) override;
void Activate(const void* input_keys,
buf_index_t* output_buf_indices,
bool* output_masks,
int64_t count) override;
void Find(const void* input_keys,
buf_index_t* output_buf_indices,
bool* output_masks,
int64_t count) override;
void Erase(const void* input_keys,
bool* output_masks,
int64_t count) override;
int64_t GetActiveIndices(buf_index_t* output_indices) override;
void Clear() override;
int64_t Size() const override;
int64_t GetBucketCount() const override;
std::vector<int64_t> BucketSizes() const override;
float LoadFactor() const override;
std::shared_ptr<tbb::concurrent_unordered_map<Key, buf_index_t, Hash>>
GetImpl() const {
return impl_;
}
protected:
std::shared_ptr<tbb::concurrent_unordered_map<Key, buf_index_t, Hash>>
impl_;
std::shared_ptr<CPUHashBackendBufferAccessor> buffer_accessor_;
void InsertImpl(const void* input_keys,
const std::vector<const void*>& input_values_soa,
buf_index_t* output_buf_indices,
bool* output_masks,
int64_t count);
void Allocate(int64_t capacity);
};
template <typename Key, typename Hash>
TBBHashBackend<Key, Hash>::TBBHashBackend(
int64_t init_capacity,
int64_t key_dsize,
const std::vector<int64_t>& value_dsizes,
const Device& device)
: DeviceHashBackend(init_capacity, key_dsize, value_dsizes, device) {
Allocate(init_capacity);
}
template <typename Key, typename Hash>
TBBHashBackend<Key, Hash>::~TBBHashBackend() {}
template <typename Key, typename Hash>
int64_t TBBHashBackend<Key, Hash>::Size() const {
return impl_->size();
}
template <typename Key, typename Hash>
void TBBHashBackend<Key, Hash>::Insert(
const void* input_keys,
const std::vector<const void*>& input_values_soa,
buf_index_t* output_buf_indices,
bool* output_masks,
int64_t count) {
int64_t new_size = Size() + count;
if (new_size > this->capacity_) {
int64_t bucket_count = GetBucketCount();
float avg_capacity_per_bucket =
float(this->capacity_) / float(bucket_count);
int64_t expected_buckets = std::max(
bucket_count * 2,
int64_t(std::ceil(new_size / avg_capacity_per_bucket)));
Rehash(expected_buckets);
}
InsertImpl(input_keys, input_values_soa, output_buf_indices, output_masks,
count);
}
template <typename Key, typename Hash>
void TBBHashBackend<Key, Hash>::Activate(const void* input_keys,
buf_index_t* output_buf_indices,
bool* output_masks,
int64_t count) {
std::vector<const void*> null_values;
Insert(input_keys, null_values, output_buf_indices, output_masks, count);
}
template <typename Key, typename Hash>
void TBBHashBackend<Key, Hash>::Find(const void* input_keys,
buf_index_t* output_buf_indices,
bool* output_masks,
int64_t count) {
const Key* input_keys_templated = static_cast<const Key*>(input_keys);
#pragma omp parallel for num_threads(utility::EstimateMaxThreads())
for (int64_t i = 0; i < count; ++i) {
const Key& key = input_keys_templated[i];
auto iter = impl_->find(key);
bool flag = (iter != impl_->end());
output_masks[i] = flag;
output_buf_indices[i] = flag ? iter->second : 0;
}
}
template <typename Key, typename Hash>
void TBBHashBackend<Key, Hash>::Erase(const void* input_keys,
bool* output_masks,
int64_t count) {
const Key* input_keys_templated = static_cast<const Key*>(input_keys);
for (int64_t i = 0; i < count; ++i) {
const Key& key = input_keys_templated[i];
auto iter = impl_->find(key);
bool flag = (iter != impl_->end());
output_masks[i] = flag;
if (flag) {
buffer_accessor_->DeviceFree(iter->second);
impl_->unsafe_erase(iter);
}
}
}
template <typename Key, typename Hash>
int64_t TBBHashBackend<Key, Hash>::GetActiveIndices(
buf_index_t* output_buf_indices) {
int64_t count = impl_->size();
int64_t i = 0;
for (auto iter = impl_->begin(); iter != impl_->end(); ++iter, ++i) {
output_buf_indices[i] = static_cast<int64_t>(iter->second);
}
return count;
}
template <typename Key, typename Hash>
void TBBHashBackend<Key, Hash>::Clear() {
impl_->clear();
this->buffer_->ResetHeap();
}
template <typename Key, typename Hash>
void TBBHashBackend<Key, Hash>::Rehash(int64_t buckets) {
int64_t count = Size();
Tensor active_keys;
std::vector<Tensor> active_values;
if (count > 0) {
Tensor active_buf_indices({count}, core::Int32, this->device_);
GetActiveIndices(
static_cast<buf_index_t*>(active_buf_indices.GetDataPtr()));
Tensor active_indices = active_buf_indices.To(core::Int64);
active_keys = this->GetKeyBuffer().IndexGet({active_indices});
auto value_buffers = this->GetValueBuffers();
for (auto& value_buffer : value_buffers) {
active_values.emplace_back(value_buffer.IndexGet({active_indices}));
}
}
float avg_capacity_per_bucket =
float(this->capacity_) / float(GetBucketCount());
int64_t new_capacity =
int64_t(std::ceil(buckets * avg_capacity_per_bucket));
Allocate(new_capacity);
if (count > 0) {
Tensor output_buf_indices({count}, core::Int32, this->device_);
Tensor output_masks({count}, core::Bool, this->device_);
std::vector<const void*> active_value_ptrs;
for (auto& active_value : active_values) {
active_value_ptrs.push_back(active_value.GetDataPtr());
}
InsertImpl(active_keys.GetDataPtr(), active_value_ptrs,
static_cast<buf_index_t*>(output_buf_indices.GetDataPtr()),
output_masks.GetDataPtr<bool>(), count);
}
impl_->rehash(buckets);
}
template <typename Key, typename Hash>
int64_t TBBHashBackend<Key, Hash>::GetBucketCount() const {
return impl_->unsafe_bucket_count();
}
template <typename Key, typename Hash>
std::vector<int64_t> TBBHashBackend<Key, Hash>::BucketSizes() const {
int64_t bucket_count = impl_->unsafe_bucket_count();
std::vector<int64_t> ret;
for (int64_t i = 0; i < bucket_count; ++i) {
ret.push_back(impl_->unsafe_bucket_size(i));
}
return ret;
}
template <typename Key, typename Hash>
float TBBHashBackend<Key, Hash>::LoadFactor() const {
return impl_->load_factor();
}
template <typename Key, typename Hash>
void TBBHashBackend<Key, Hash>::InsertImpl(
const void* input_keys,
const std::vector<const void*>& input_values_soa,
buf_index_t* output_buf_indices,
bool* output_masks,
int64_t count) {
const Key* input_keys_templated = static_cast<const Key*>(input_keys);
size_t n_values = value_dsizes_.size();
bool assign = (input_values_soa.size() == n_values);
if (input_values_soa.size() != n_values && input_values_soa.size() != 0) {
utility::LogWarning(
"Input values mismatch with actual stored values, fall back to "
"activate/reset instead of insertion.");
}
#pragma omp parallel for num_threads(utility::EstimateMaxThreads())
for (int64_t i = 0; i < count; ++i) {
output_buf_indices[i] = 0;
output_masks[i] = false;
const Key& key = input_keys_templated[i];
// Try to insert a dummy buffer index.
auto res = impl_->insert({key, 0});
// Lazy copy key value pair to buffer only if succeeded
if (res.second) {
buf_index_t buf_index = buffer_accessor_->DeviceAllocate();
void* key_ptr = buffer_accessor_->GetKeyPtr(buf_index);
// Copy templated key to buffer
*static_cast<Key*>(key_ptr) = key;
// Copy/reset non-templated value in buffer
for (size_t j = 0; j < n_values; ++j) {
uint8_t* dst_value = static_cast<uint8_t*>(
buffer_accessor_->GetValuePtr(buf_index, j));
if (assign) {
const uint8_t* src_value =
static_cast<const uint8_t*>(input_values_soa[j]) +
this->value_dsizes_[j] * i;
std::memcpy(dst_value, src_value, this->value_dsizes_[j]);
} else {
std::memset(dst_value, 0, this->value_dsizes_[j]);
}
}
// Update from dummy 0
res.first->second = buf_index;
// Write to return variables
output_buf_indices[i] = buf_index;
output_masks[i] = true;
}
}
}
template <typename Key, typename Hash>
void TBBHashBackend<Key, Hash>::Allocate(int64_t capacity) {
this->capacity_ = capacity;
this->buffer_ = std::make_shared<HashBackendBuffer>(
this->capacity_, this->key_dsize_, this->value_dsizes_,
this->device_);
buffer_accessor_ =
std::make_shared<CPUHashBackendBufferAccessor>(*this->buffer_);
impl_ = std::make_shared<
tbb::concurrent_unordered_map<Key, buf_index_t, Hash>>(capacity,
Hash());
}
} // namespace core
} // namespace open3d
|
GB_unaryop__minv_int8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int8_uint16
// op(A') function: GB_tran__minv_int8_uint16
// C type: int8_t
// A type: uint16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int8_uint16
(
int8_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bml_copy_ellpack_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_copy.h"
#include "../bml_types.h"
#include "bml_allocate_ellpack.h"
#include "bml_copy_ellpack.h"
#include "bml_types_ellpack.h"
#include <complex.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
/** Copy an ellpack matrix - result is a new matrix.
*
* \ingroup copy_group
*
* \param A The matrix to be copied
* \return A copy of matrix A.
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_copy_ellpack_new) (
bml_matrix_ellpack_t * A)
{
bml_matrix_dimension_t matrix_dimension = { A->N, A->N, A->M };
bml_matrix_ellpack_t *B =
TYPED_FUNC(bml_noinit_matrix_ellpack) (matrix_dimension,
A->distribution_mode);
int N = A->N;
int M = A->M;
int *A_index = A->index;
int *A_nnz = A->nnz;
REAL_T *A_value = A->value;
int *B_index = B->index;
int *B_nnz = B->nnz;
REAL_T *B_value = B->value;
#ifdef USE_OMP_OFFLOAD
{
#pragma omp target teams distribute parallel for
for (int i = 0; i < N; i++)
{
B_nnz[i] = A_nnz[i];
}
#pragma omp target teams distribute parallel for collapse(2) schedule (static, 1)
for (int i = 0; i < N; i++)
{
for (int j = 0; j < M; j++)
{
B_index[ROWMAJOR(i, j, N, M)] = A_index[ROWMAJOR(i, j, N, M)];
B_value[ROWMAJOR(i, j, N, M)] = A_value[ROWMAJOR(i, j, N, M)];
}
}
}
#else
// memcpy(B->index, A->index, sizeof(int) * A->N * A->M);
memcpy(B->nnz, A->nnz, sizeof(int) * A->N);
// memcpy(B->value, A->value, sizeof(REAL_T) * A->N * A->M);
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
memcpy(&B_index[ROWMAJOR(i, 0, N, M)], &A_index[ROWMAJOR(i, 0, N, M)],
M * sizeof(int));
memcpy(&B_value[ROWMAJOR(i, 0, N, M)], &A_value[ROWMAJOR(i, 0, N, M)],
M * sizeof(REAL_T));
// A_nnz[perm[i]] = B_nnz[i];
}
#endif
bml_copy_domain(A->domain, B->domain);
bml_copy_domain(A->domain2, B->domain2);
return B;
}
/** Copy an ellpack matrix.
*
* \ingroup copy_group
*
* \param A The matrix to be copied
* \param B Copy of matrix A
*/
void TYPED_FUNC(
bml_copy_ellpack) (
bml_matrix_ellpack_t * A,
bml_matrix_ellpack_t * B)
{
assert(A->M > 0);
int N = A->N;
int M = A->M;
int *A_index = A->index;
int *A_nnz = A->nnz;
REAL_T *A_value = A->value;
int *B_index = B->index;
int *B_nnz = B->nnz;
REAL_T *B_value = B->value;
#ifdef USE_OMP_OFFLOAD
#pragma omp target teams distribute parallel for
for (int i = 0; i < N; i++)
{
B_nnz[i] = A_nnz[i];
}
#pragma omp target teams distribute parallel for collapse(2) schedule (static, 1)
for (int i = 0; i < N; i++)
{
for (int j = 0; j < M; j++)
{
B_index[ROWMAJOR(i, j, N, M)] = A_index[ROWMAJOR(i, j, N, M)];
B_value[ROWMAJOR(i, j, N, M)] = A_value[ROWMAJOR(i, j, N, M)];
}
}
#else
// memcpy(B->index, A->index, sizeof(int) * A->N * A->M);
memcpy(B->nnz, A->nnz, sizeof(int) * A->N);
// memcpy(B->value, A->value, sizeof(REAL_T) * A->N * A->M);
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
memcpy(&B_index[ROWMAJOR(i, 0, N, M)], &A_index[ROWMAJOR(i, 0, N, M)],
M * sizeof(int));
memcpy(&B_value[ROWMAJOR(i, 0, N, M)], &A_value[ROWMAJOR(i, 0, N, M)],
M * sizeof(REAL_T));
// A_nnz[perm[i]] = B_nnz[i];
}
#endif
if (A->distribution_mode == B->distribution_mode)
{
bml_copy_domain(A->domain, B->domain);
bml_copy_domain(A->domain2, B->domain2);
}
}
/** Reorder an ellpack matrix.
*
* \ingroup copy_group
*
* \param A The matrix to be reordered
* \param perm The permutation vector
*/
void TYPED_FUNC(
bml_reorder_ellpack) (
bml_matrix_ellpack_t * A,
int *perm)
{
int N = A->N;
int M = A->M;
int *A_index = A->index;
int *A_nnz = A->nnz;
REAL_T *A_value = A->value;
bml_matrix_ellpack_t *B = bml_copy_new(A);
int *B_index = B->index;
int *B_nnz = B->nnz;
REAL_T *B_value = B->value;
#ifdef USE_OMP_OFFLOAD
#pragma omp target update from(A_nnz[:N], A_index[:N*M], A_value[:N*M])
#pragma omp target update from(B_nnz[:N], B_index[:N*M], B_value[:N*M])
#endif
// Reorder rows - need to copy
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
memcpy(&A_index[ROWMAJOR(perm[i], 0, N, M)],
&B_index[ROWMAJOR(i, 0, N, M)], M * sizeof(int));
memcpy(&A_value[ROWMAJOR(perm[i], 0, N, M)],
&B_value[ROWMAJOR(i, 0, N, M)], M * sizeof(REAL_T));
A_nnz[perm[i]] = B_nnz[i];
}
bml_deallocate_ellpack(B);
// Reorder elements in each row - just change index
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
for (int j = 0; j < A_nnz[i]; j++)
{
A_index[ROWMAJOR(i, j, N, M)] =
perm[A_index[ROWMAJOR(i, j, N, M)]];
}
}
#ifdef USE_OMP_OFFLOAD
#pragma omp target update to(B_nnz[:N], B_index[:N*M], B_value[:N*M])
#endif
}
|
dense_error_approximation.h | #ifndef __DENSE_ERROR_APPROXIMATION_H__
#define __DENSE_ERROR_APPROXIMATION_H__
#include <h2opus/core/hmatrix.h>
#include <h2opus/util/blas_wrappers.h>
#include <h2opus/util/debug_routines.h>
inline std::vector<int> FisherYatesShuffle(int samples, int elements)
{
std::vector<int> a(samples), source(elements);
for (int i = 0; i < elements; i++)
source[i] = i;
for (int i = 0; i < samples; i++)
{
int j = (int)((float)rand() / (float)(RAND_MAX) * (elements - i - 1));
a[i] = source[j];
std::swap(source[j], source[elements - i - 1]);
}
return a;
}
template <class MatGen>
inline H2Opus_Real getHgemvApproximationError(int matrix_dim, MatGen &mat_gen, H2Opus_Real percentage_rows,
H2Opus_Real *y_h, H2Opus_Real *x)
{
int samples = percentage_rows * matrix_dim;
double total_err = 0;
std::vector<int> row_samples = FisherYatesShuffle(samples, matrix_dim);
#pragma omp parallel for reduction(+ : total_err)
for (int sample = 0; sample < samples; sample++)
{
int i = row_samples[sample];
double y_sample = 0;
for (int j = 0; j < matrix_dim; j++)
y_sample += mat_gen.generateEntry(i, j) * x[j];
double err = (double)(y_sample - y_h[i]);
total_err += err * err;
}
total_err /= samples;
total_err *= matrix_dim;
double y_norm = 0;
for (int i = 0; i < matrix_dim; i++)
y_norm += y_h[i] * y_h[i];
// printf("Norm y: %e\n", sqrt(y_norm));
return (H2Opus_Real)sqrt(total_err / y_norm);
// return (H2Opus_Real)sqrt(total_err);
}
template <class MatGen>
inline H2Opus_Real getHgemvApproximationInfinityError(int matrix_dim, MatGen &mat_gen, H2Opus_Real *y_h, H2Opus_Real *x)
{
double max_error = 0;
#pragma omp parallel for reduction(max : max_error)
for (int i = 0; i < matrix_dim; i++)
{
double y_sample = 0;
for (int j = 0; j < matrix_dim; j++)
y_sample += mat_gen.generateEntry(i, j) * x[j];
if (y_sample != 0)
{
double err = fabs((double)(y_sample - y_h[i]) / y_sample);
if (err > max_error)
max_error = err;
}
}
return max_error;
}
template <class MatGen>
inline H2Opus_Real getHgemvApproximation2NormError(int matrix_dim, MatGen &mat_gen, H2Opus_Real *y_h, H2Opus_Real *x)
{
double total_error = 0;
double y_norm = 0;
#pragma omp parallel for reduction(+ : total_error)
for (int i = 0; i < matrix_dim; i++)
{
double y_sample = 0;
for (int j = 0; j < matrix_dim; j++)
y_sample += mat_gen.generateEntry(i, j) * x[j];
double err = (double)(y_sample - y_h[i]);
total_error += err * err;
y_norm += y_sample * y_sample;
}
return sqrt(total_error / y_norm);
}
template <class MatGen>
inline H2Opus_Real getApproximationErrorEstimate(HMatrix &hmatrix, MatGen &mat_gen, int largest_dim, int level_samples)
{
BasisTree &u_basis_tree = hmatrix.u_basis_tree;
BasisTree &v_basis_tree = (hmatrix.sym ? u_basis_tree : hmatrix.v_basis_tree);
// Now expand the basis trees
int max_rank = hmatrix.u_basis_tree.level_data.getLargestRank();
H2Opus_Real *u_matrices = (H2Opus_Real *)malloc(sizeof(H2Opus_Real) * hmatrix.n * max_rank * u_basis_tree.depth);
H2Opus_Real *v_matrices = (H2Opus_Real *)malloc(sizeof(H2Opus_Real) * hmatrix.n * max_rank * v_basis_tree.depth);
assert(u_matrices && v_matrices);
expandBasis(u_basis_tree, 0, u_matrices, 0, hmatrix.n, max_rank);
expandBasis(v_basis_tree, 0, v_matrices, 0, hmatrix.n, max_rank);
std::vector<H2Opus_Real> level_errors(hmatrix.hnodes.depth, 0);
#pragma omp parallel for
for (int level = hmatrix.hnodes.depth - 1; level >= 0; level--)
{
H2Opus_Real *sv_matrix = (H2Opus_Real *)malloc(sizeof(H2Opus_Real) * largest_dim * max_rank);
H2Opus_Real *usv_matrix = (H2Opus_Real *)malloc(sizeof(H2Opus_Real) * largest_dim * largest_dim);
int level_start, level_end, level_size;
hmatrix.hnodes.getCouplingLevelRange(level, level_start, level_end);
level_size = hmatrix.hnodes.getCouplingLevelSize(level);
std::vector<int> node_samples = FisherYatesShuffle(std::min(level_samples, level_size), level_size);
H2Opus_Real level_error = 0;
int averaged_nodes = 0;
for (int node_sample = 0; node_sample < node_samples.size(); node_sample++)
{
int node_index = level_start + node_samples[node_sample];
assert(node_index < level_end);
int tree_index = hmatrix.hnodes.rank_leaf_tree_index[node_index];
int u_index = hmatrix.hnodes.node_u_index[tree_index];
int v_index = hmatrix.hnodes.node_v_index[tree_index];
int u_1 = u_basis_tree.node_start[u_index], v_1 = v_basis_tree.node_start[v_index];
// int u_2 = u_1 + u_basis_tree.node_len[u_index] - 1, v_2 = v_1 + v_basis_tree.node_len[v_index] - 1;
H2Opus_Real *s_matrix = hmatrix.hnodes.getCouplingMatrix(level, node_index - level_start);
H2Opus_Real *u_matrix = u_matrices + hmatrix.n * max_rank * level + u_1;
H2Opus_Real *v_matrix = v_matrices + hmatrix.n * max_rank * level + v_1;
int v_len = v_basis_tree.node_len[v_index];
int v_dim = v_basis_tree.getLevelRank(level);
int u_len = u_basis_tree.node_len[u_index];
if (v_len > largest_dim || u_len > largest_dim)
continue;
averaged_nodes++;
// Calculate S * V'
h2opus_fbl_gemm(H2OpusFBLNoTrans, H2OpusFBLTrans, v_dim, v_len, v_dim, 1, s_matrix, v_dim, v_matrix,
hmatrix.n, 0, sv_matrix, v_dim);
// Calculate U * (S * V')
h2opus_fbl_gemm(H2OpusFBLNoTrans, H2OpusFBLNoTrans, u_len, v_len, v_dim, 1, u_matrix, hmatrix.n, sv_matrix,
v_dim, 0, usv_matrix, u_len);
for (int j = 0; j < u_len; j++)
{
for (int k = 0; k < v_len; k++)
{
int full_matrix_i = u_1 + j;
int full_matrix_j = v_1 + k;
H2Opus_Real hmatrix_entry = usv_matrix[j + k * u_len];
// for(int l = 0; l < v_dim; l++)
// hmatrix_entry += u_matrix[j + l * hmatrix.n] * sv_matrix[l + k * v_dim];
H2Opus_Real full_entry = mat_gen.generateEntry(full_matrix_i, full_matrix_j);
H2Opus_Real entry_error = (hmatrix_entry - full_entry);
level_error += entry_error * entry_error;
}
}
}
if (averaged_nodes == 0)
{
assert(level != hmatrix.hnodes.depth - 1);
level_errors[level] = level_errors[level + 1];
}
else
{
level_errors[level] = level_error / averaged_nodes;
}
printf("Level %d error: %e\n", level, level_errors[level]);
free(sv_matrix);
free(usv_matrix);
}
H2Opus_Real error = 0.0;
for (int level = 0; level < hmatrix.hnodes.depth; level++)
{
int level_size = hmatrix.hnodes.getCouplingLevelSize(level);
error += level_errors[level] * level_size;
}
free(u_matrices);
free(v_matrices);
H2Opus_Real hfrob_norm = frobeniusHNorm(hmatrix);
return sqrt(error) / hfrob_norm;
}
template <class MatGen> inline H2Opus_Real getApproximationError(HMatrix &hmatrix, MatGen &mat_gen)
{
BasisTree &u_basis_tree = hmatrix.u_basis_tree;
BasisTree &v_basis_tree = (hmatrix.sym ? u_basis_tree : hmatrix.v_basis_tree);
// Now expand the basis trees
int max_rank = hmatrix.u_basis_tree.level_data.getLargestRank();
H2Opus_Real *u_matrices = (H2Opus_Real *)malloc(sizeof(H2Opus_Real) * hmatrix.n * max_rank * u_basis_tree.depth);
H2Opus_Real *v_matrices = (H2Opus_Real *)malloc(sizeof(H2Opus_Real) * hmatrix.n * max_rank * v_basis_tree.depth);
assert(u_matrices && v_matrices);
expandBasis(u_basis_tree, 0, u_matrices, 0, hmatrix.n, max_rank);
expandBasis(v_basis_tree, 0, v_matrices, 0, hmatrix.n, max_rank);
H2Opus_Real error = 0.0;
for (int level = 0; level < hmatrix.hnodes.depth; level++)
{
int level_start, level_end;
hmatrix.hnodes.getCouplingLevelRange(level, level_start, level_end);
for (int node_index = level_start; node_index < level_end; node_index++)
{
int tree_index = hmatrix.hnodes.rank_leaf_tree_index[node_index];
int u_index = hmatrix.hnodes.node_u_index[tree_index];
int v_index = hmatrix.hnodes.node_v_index[tree_index];
int u_1 = u_basis_tree.node_start[u_index], v_1 = v_basis_tree.node_start[v_index];
// int u_2 = u_1 + u_basis_tree.node_len[u_index] - 1, v_2 = v_1 + v_basis_tree.node_len[v_index] - 1;
H2Opus_Real *s_matrix = hmatrix.hnodes.getCouplingMatrix(level, node_index - level_start);
H2Opus_Real *u_matrix = u_matrices + hmatrix.n * max_rank * level + u_1;
H2Opus_Real *v_matrix = v_matrices + hmatrix.n * max_rank * level + v_1;
int v_len = v_basis_tree.node_len[v_index];
int v_dim = v_basis_tree.getLevelRank(level);
H2Opus_Real *sv_matrix = (H2Opus_Real *)malloc(sizeof(H2Opus_Real) * v_len * v_dim);
// Calculate S * V'
for (int j = 0; j < v_dim; j++)
{
for (int k = 0; k < v_len; k++)
{
int index = j + k * v_dim;
sv_matrix[index] = 0;
for (int l = 0; l < v_dim; l++)
{
int s_index = j + l * v_dim;
sv_matrix[index] += s_matrix[s_index] * v_matrix[k + l * hmatrix.n];
}
}
}
// Calculate U * (S * V')
int u_len = u_basis_tree.node_len[u_index];
for (int j = 0; j < u_len; j++)
{
for (int k = 0; k < v_len; k++)
{
int full_matrix_i = u_1 + j;
int full_matrix_j = v_1 + k;
H2Opus_Real hmatrix_entry = 0;
for (int l = 0; l < v_dim; l++)
hmatrix_entry += u_matrix[j + l * hmatrix.n] * sv_matrix[l + k * v_dim];
H2Opus_Real full_entry = mat_gen.generateEntry(full_matrix_i, full_matrix_j);
H2Opus_Real entry_error = (hmatrix_entry - full_entry);
// if(full_entry != 0) entry_error /= full_entry;
error += entry_error * entry_error;
}
}
free(sv_matrix);
}
}
free(u_matrices);
free(v_matrices);
H2Opus_Real hfrob_norm = frobeniusHNorm(hmatrix);
return sqrt(error) / hfrob_norm;
}
#endif
|
omp.c | /***********************************************************
* Edgar A. Leon
* Lawrence Livermore National Laboratory
***********************************************************/
#include <stdio.h>
#include <string.h>
#include <omp.h>
#include "affinity.h"
int main(int argc, char *argv[])
{
char buf[LONG_STR_SIZE];
int i;
int ncpus = get_num_cpus();
int verbose = 0;
int nc = 0;
/* Get rid of compiler warning. Ay. */
(void) verbose;
/* Command-line options */
if (argc > 1)
for (i=1; i<argc; i++) {
if ( strcmp(argv[i], "-v") == 0 )
verbose = 1;
}
nc += sprintf(buf+nc, "Process runing on %d CPUs: ", ncpus);
nc += get_cpu_affinity(buf+nc);
#ifdef HAVE_GPUS
int ndevs = get_gpu_count();
nc += sprintf(buf+nc, "Process has %d GPUs: ", ndevs);
nc += get_gpu_affinity(buf+nc);
nc += get_gpu_info_all(buf+nc);
#endif
/* Print the process information */
printf("\n%s", buf);
/* Clear buffer for reuse */
nc = 0;
buf[0] = '\0';
#pragma omp parallel firstprivate(buf, nc) private(ncpus) shared(verbose)
{
int tid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
ncpus = get_num_cpus();
nc += sprintf(buf+nc, "Thread %3d/%3d running on %d CPUs: ",
tid, nthreads, ncpus);
nc += get_cpu_affinity(buf+nc);
#ifdef HAVE_GPUS
int dev = tid % ndevs;
nc += sprintf(buf+nc, "Thread %3d/%3d assigned to GPU: 0x%x\n",
tid, nthreads, get_gpu_pci_id(dev));
if (verbose)
nc += get_gpu_info(dev, buf+nc);
#endif
printf("%s", buf);
}
return 0;
}
|
leet_cc_fmt_plug.c | /* Cracker for leet.cc hashes.
*
* hsh = bin2hex(hash("sha512", $password . $salt, true) ^ hash("whirlpool", $salt . $password, true))
* $salt == username
*
* Input hash format: username:hash
*
* This software is Copyright (c) 2016, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_leet;
#elif FMT_REGISTERS_H
john_register_one(&fmt_leet);
#else
#include "arch.h"
#include "openssl_local_overrides.h"
#include <openssl/opensslv.h>
#include <string.h>
#if (AC_BUILT && HAVE_WHIRLPOOL) || \
(!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL)
#include <openssl/whrlpool.h>
#define WP_TYPE "OpenSSL"
#define sph_whirlpool_context WHIRLPOOL_CTX
#define sph_whirlpool_init(a) WHIRLPOOL_Init(a)
#define sph_whirlpool(a,b,c) WHIRLPOOL_Update(a,b,c)
#define sph_whirlpool_close(b,a) WHIRLPOOL_Final(a,b)
#else
#define WP_TYPE "SPH"
#include "sph_whirlpool.h"
#endif
#include "sha2.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
#ifdef _OPENMP
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 128 // tuned on Core i7-6600U
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#ifdef SIMD_COEF_64
#define SHA512_TYPE SHA512_ALGORITHM_NAME
#else
#define SHA512_TYPE "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#ifdef SIMD_COEF_64
#define PLAINTEXT_LENGTH (111-32)
#define MAX_SALT_LEN 32
#else
#define PLAINTEXT_LENGTH 125
#define MAX_SALT_LEN 256
#endif
#define FORMAT_LABEL "leet"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA-512(" SHA512_TYPE ") + Whirlpool(" WP_TYPE "/" ARCH_BITS_STR ")"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 64
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_64)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests leet_tests[] = {
{"salt$f86036a85e3ff84e73bf10769011ecdbccbf5aaed9df0240310776b42f5bb8776e612ab15a78bbfc39e867448a08337d97427e182e72922bbaa903ee75b2bfd4", "password"},
{"Babeface$3e6380026fc262465934fd5352659c874e611cbf3229cdbf1407c3bae4c6f0b9c437470d202bccc65cf82faf883d299f1ab30ed841cd8f2472c58f4f05ac6ca3", "john"},
{"user$b8baf965f515e41c9bf4bc31f0652f27b746c3155f79bc39d2ba8557a8e4a803fd4c0418d577957044bd403d98847750231cb9f03fb213dcddf73304180309dc", "ripper"},
{"harvey$581e6f9aee99df55bb815bb608707a640a8deae3bad343d0421822518f2c9d8a053221356894628e30f70bf91d36ca2a7300407ec6686fefaa46cbad07b0f78e", "openwall"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static ARCH_WORD_64 (*crypt_out)[1];
static struct custom_salt {
int saltlen;
unsigned char salt[MAX_SALT_LEN];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
// salt (username) is added to the ciphertext in the prepare function
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
q = strchr(p, '$'); // end of salt
if (!q)
return 0;
if (q - p > 256)
return 0;
q = strrchr(ciphertext, '$') + 1;
if (strlen(q) != BINARY_SIZE * 2)
goto err;
if (!ishex(q))
goto err;
return 1;
err:
return 0;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char* cp;
if (!split_fields[0])
return split_fields[1];
if (strlen(split_fields[1]) != BINARY_SIZE * 2)
return split_fields[1];
cp = mem_alloc_tiny(strlen(split_fields[0]) + strlen(split_fields[1]) + 2, MEM_ALIGN_NONE);
sprintf(cp, "%s$%s", split_fields[0], split_fields[1]);
if (valid(cp, self)) {
return cp;
}
MEM_FREE(cp);
return split_fields[1];
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p, *q;
memset(&cs, 0, sizeof(cs));
p = ciphertext;
q = strrchr(ciphertext, '$');
strncpy((char*)cs.salt, p, q - p);
cs.saltlen = q - p;
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{ static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
int i;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
sph_whirlpool_context wctx;
int i;
union {
unsigned char buf[BINARY_SIZE];
ARCH_WORD_64 p64[1];
} output1[MAX_KEYS_PER_CRYPT], output2;
#ifdef SIMD_COEF_64
// Not sure why JTR_ALIGN(MEM_ALIGN_SIMD) does n ot work here
// but if used, it cores travis-ci, so we use mem_align instead
unsigned char _in[8*16*MAX_KEYS_PER_CRYPT+MEM_ALIGN_SIMD];
unsigned char _out[8*8*MAX_KEYS_PER_CRYPT+MEM_ALIGN_SIMD];
ARCH_WORD_64 *in = mem_align(_in, MEM_ALIGN_SIMD);
ARCH_WORD_64 *out = mem_align(_out, MEM_ALIGN_SIMD);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
char *cp = &((char*)in)[128*i];
memcpy(cp, saved_key[index+i], saved_len[index+i]);
memcpy(&cp[saved_len[index+i]], cur_salt->salt, cur_salt->saltlen);
cp[saved_len[index+i]+cur_salt->saltlen] = 0x80;
in[i*16+15] = (saved_len[index+i]+cur_salt->saltlen)<<3;
memset(&cp[saved_len[index+i]+cur_salt->saltlen+1], 0, 120-(saved_len[index+i]+cur_salt->saltlen+1));
}
SIMDSHA512body(in, out, NULL, SSEi_FLAT_IN);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
output1[i].p64[0] = JOHNSWAP64(out[((i/SIMD_COEF_64)*8*SIMD_COEF_64+i%SIMD_COEF_64)]);
#else
SHA512_CTX sctx;
SHA512_Init(&sctx);
SHA512_Update(&sctx, saved_key[index], saved_len[index]);
SHA512_Update(&sctx, cur_salt->salt, cur_salt->saltlen);
SHA512_Final(output1[0].buf, &sctx);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
sph_whirlpool_init(&wctx);
sph_whirlpool(&wctx, cur_salt->salt, cur_salt->saltlen);
sph_whirlpool(&wctx, saved_key[index+i], saved_len[index+i]);
sph_whirlpool_close(&wctx, output2.buf);
crypt_out[index+i][0] = output1[i].p64[0] ^ output2.p64[0];
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (((ARCH_WORD_64*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return ((ARCH_WORD_64*)binary)[0] == crypt_out[index][0];
}
static int cmp_exact(char *source, int index)
{
// don't worry about SIMD here.
// we already are 64 bit 'sure'. This extra check
// is not really needed, but does not hurt much
SHA512_CTX sctx;
int i;
void *bin = get_binary(source);
sph_whirlpool_context wctx;
unsigned char output1[BINARY_SIZE], output2[BINARY_SIZE];
SHA512_Init(&sctx);
SHA512_Update(&sctx, saved_key[index], saved_len[index]);
SHA512_Update(&sctx, cur_salt->salt, cur_salt->saltlen);
SHA512_Final(output1, &sctx);
sph_whirlpool_init(&wctx);
sph_whirlpool(&wctx, cur_salt->salt, cur_salt->saltlen);
sph_whirlpool(&wctx, saved_key[index], saved_len[index]);
sph_whirlpool_close(&wctx, output2);
for (i = 0; i < BINARY_SIZE; ++i)
output1[i] ^= output2[i];
return !memcmp(output1, bin, BINARY_SIZE);
}
static void leet_set_key(char *key, int index)
{
saved_len[index] =
strnzcpyn(saved_key[index], key, sizeof(saved_key[index]));
}
static char *get_key(int index)
{
return saved_key[index];
}
// Public domain hash function by DJ Bernstein
static int salt_hash(void *salt)
{
unsigned int hash = 5381;
struct custom_salt *fck = (struct custom_salt *)salt;
unsigned char *s = fck->salt;
int length = fck->saltlen / 4;
while (length) {
hash = ((hash << 5) + hash) ^ *s++;
length--;
}
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_leet = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
NULL,
},
{ NULL },
leet_tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
NULL
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
leet_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
stream.c | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 1.3 2015-03-02 08:36:29 frmh84 Exp $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
// # define STREAM_ARRAY_SIZE 10000000
# define STREAM_ARRAY_SIZE 80000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 10
#endif
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 1.3 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(a[j]/aj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(b[j]/bj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(c[j]/cj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
buildingGABLS.c | #include "grid/octree.h"
#include "navier-stokes/centered.h"
#include "SGS/Vreman.h"
#include "tracer.h"
#include "diffusion.h"
#include "grid/adaptLES.h"
#define MAXLEVEL 5
#define temp 5
#define Re 1e3
#define sq(x) x * x
scalar T[];
scalar * tracers = {T};
mgstats mgT;
face vector av[];
double f = 1.39e-4;
int main()
{
T[bottom]=dirichlet((265-(0.25*(t/3600))));
T[top] = dirichlet (268);
u.t[bottom]=dirichlet(0);
u.t[top]=neumann(0);
periodic (right);
periodic (front);
L0 = 400.;
X0 = Y0 = Z0 = 0;
DT = 10;
init_grid (1 << MAXLEVEL);
a=av;
run();
}
face vector muv[];
event init(t=0)
{
adaptLESinit(MAXLEVEL);
mu=muv;
foreach()
{
u.x[]= 8;
T[]=((265+0.1*noise())*(y<50))+((y>50)*265) + (((0.01*(y-100)))*(y>100));
}
boundary(all);
}
event acceleration (i++)
{
face vector D = mu;
mgT = diffusion (T, dt, D);
foreach()
{
av.x[] = f*u.z[]-((y>300)*(u.x[]-8));
av.y[] = 9.81*(T[]+T[0,-1,0])/(2*265)-((y>300)*u.y[]);
av.z[] = f*(8-u.x[])-((y>300)*u.z[]);
}
boundary(all);
}
event SGS (i++)
{
scalar muB[];
eddyviscosity(0.17,u,muB);
boundary({muB});
foreach_face()
{
muv.x[]=(muB[]+muB[-1,0,0])/2;
}
boundary((scalar *){muv});
}
event logfile (t <= temp; t += 1)
{
stats s = statsf (u.x);
stats m = statsf(mu.x);
fprintf (ferr, "t = %g\ti = %d\tdt = %g\tmin(Evis) = %g\nmax(Evis) = %g\tavg(Evis) = %g\tavg(ux) = %g \n",t, i, dt,m.min ,m.max,(float)m.sum/(L0*L0*L0), (float)s.sum/(L0*L0*L0));
}
event output (t += 1)
{
float Delt = L0/N;
char name[80];
sprintf (name, "Tprof%d.dat",(int)roundf(t));
FILE * fp4 = fopen (name, "w");
sprintf (name, "uxprof%d.dat",(int)roundf(t));
FILE * fp5 = fopen (name, "w");
sprintf (name, "uzprof%d.dat",(int)roundf(t));
FILE * fp6 = fopen (name, "w");
sprintf (name, "LEVELprof%d.dat",(int)roundf(t));
FILE * fp7 = fopen (name, "w");
sprintf (name, "Yprof%d.dat",(int)roundf(t));
FILE * fp8 = fopen (name, "w");
#ifdef _OPENMP
#pragma omp parallel for reduction(+:F) default(shared)
#endif
for (int k = 0; k < N; k++)
{
double uz=0 , ux=0 , F= 0, LEVEL=0, yz=0; ;
float yp = Delt * k + Y0 + Delt/2;
for (int i = 0; i < N; i++)
{
float xp = Delt*i + X0 + Delt/2.;
for (int j = 0; j < N; j++)
{
float zp = Delt*j + X0 + Delt/2.;
Point point = locate (xp, yp,zp);
F += T[];
uz+= u.z[];
ux+= u.x[];
LEVEL += level;
yz += y;
}
}
fprintf(fp4,"%g\t%g \n",yp,F/(N*N));
fprintf(fp5,"%g\t%g \n",yp,ux/(N*N));
fprintf(fp6,"%g\t%g \n",yp,uz/(N*N));
fprintf(fp7,"%g\t%g \n",yp,LEVEL/(N*N));
fprintf(fp8,"%g\t%g \n",yp,yz/(N*N));
}
fclose(fp4);
fclose(fp5);
fclose(fp6);
fclose(fp7);
fclose(fp8);
}
void levels(double YY[],int LL[],int maxlevel)
{
int DD = pow(2,maxlevel), l = 0;
float xp = Z0+(L0/(DD*2)), zp = Z0 + (L0/(DD*2));
for (int k=0;k<DD;k++)
{
float yp = Y0+(L0/(DD*2))+k*(L0/DD);
Point point = locate (xp, yp,zp);
if (k==0)
{
YY[l]=y;
LL[l]=level;
l++;
}
else if (y!=YY[l-1] )
{
YY[l]=y;
LL[l]=level;
l++;
}
}
}
event adapt (t+=1)
{
double Yz[1<<MAXLEVEL] = {0};
int Lz[1<<MAXLEVEL] = {0};
levels(Yz,Lz,MAXLEVEL);
int m=0 , n=0;
while(Yz[m]>0.01)
{
fprintf(ferr,"\t%d\t%g\t%d \n ",m+1,Yz[m] ,Lz[m]);
n+=pow(2,Lz[m]*2);
m++;
}
fprintf(ferr,"\t#gridcells ~ %g ^ 3\n",round(pow(n,(0.33333))));
adaptLESrun(MAXLEVEL,u,T,mu.y,Lz,Yz);
boundary(all);
}
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/property.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
Forward declarations.
*/
static MagickBooleanType
TransformsRGBImage(Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o l o r s p a c e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageColorspaceType() returns the potential type of image:
% sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc.
%
% To ensure the image type matches its potential, use SetImageColorspaceType():
%
% (void) SetImageColorspaceType(image,GetImageColorspaceType(image),
% exception);
%
% The format of the GetImageColorspaceType method is:
%
% ColorspaceType GetImageColorspaceType(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ColorspaceType GetImageColorspaceType(const Image *image,
ExceptionInfo *exception)
{
ColorspaceType
colorspace;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorspace=image->colorspace;
type=IdentifyImageType(image,exception);
if ((type == BilevelType) || (type == GrayscaleType) ||
(type == GrayscaleAlphaType))
colorspace=GRAYColorspace;
return(colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ s R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the sRGBTransformImage method is:
%
% MagickBooleanType sRGBTransformImage(Image *image,
% const ColorspaceType colorspace,EsceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertAdobe98ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertAdobe98ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertDisplayP3ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertDisplayP3ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertProPhotoToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertProPhotoToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertRGBToCMY(const double red,const double green,
const double blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static void ConvertRGBToAdobe98(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToAdobe98(X,Y,Z,r,g,b);
}
static void ConvertRGBToDisplayP3(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToDisplayP3(X,Y,Z,r,g,b);
}
static void ConvertRGBToProPhoto(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToProPhoto(X,Y,Z,r,g,b);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const double red,const double green,
const double blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLuv(const double red,const double green,
const double blue,double *L,double *u,double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,L,u,v);
}
static void ConvertRGBToxyY(const double red,const double green,
const double blue,double *low_x,double *low_y,double *cap_Y)
{
double
gamma,
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
gamma=PerceptibleReciprocal(X+Y+Z);
*low_x=gamma*X;
*low_y=gamma*Y;
*cap_Y=Y;
}
static void inline ConvertXYZToJzazbz(const double X,const double Y,
const double Z,const double white_luminance,double *Jz,double *az,double *bz)
{
#define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */
#define Jzazbz_g 0.66
#define Jzazbz_c1 (3424.0/4096.0)
#define Jzazbz_c2 (2413.0/128.0)
#define Jzazbz_c3 (2392.0/128.0)
#define Jzazbz_n (2610.0/16384.0)
#define Jzazbz_p (1.7*2523.0/32.0)
#define Jzazbz_d (-0.56)
#define Jzazbz_d0 (1.6295499532821566e-11)
double
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1));
Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1));
Zp=Z;
L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp;
M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp;
S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp;
gamma=pow(L/white_luminance,Jzazbz_n);
Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(M/white_luminance,Jzazbz_n);
Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(S/white_luminance,Jzazbz_n);
Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
Iz=0.5*Lp+0.5*Mp;
*az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5;
*bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5;
*Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0;
}
static void inline ConvertJzazbzToXYZ(const double Jz,const double az,
const double bz,const double white_luminance,double *X,double *Y,double *Z)
{
double
azz,
bzz,
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
gamma=Jz+Jzazbz_d0;
Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0);
azz=az-0.5;
bzz=bz-0.5;
Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz;
Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz;
Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz;
gamma=pow(Lp,1.0/Jzazbz_p);
L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Mp,1.0/Jzazbz_p);
M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Sp,1.0/Jzazbz_p);
S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S;
Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S;
Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S;
*X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b;
*Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g;
*Z=Zp;
}
static void ConvertRGBToJzazbz(const double red,const double green,
const double blue,const double white_luminance,double *Jz,double *az,
double *bz)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z);
ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz);
}
static void ConvertJzazbzToRGB(const double Jz,const double az,
const double bz,const double white_luminance,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,blue,green);
}
static void ConvertRGBToYDbDr(const double red,const double green,
const double blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const double red,const double green,
const double blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
static void ConvertRGBToYPbPr(const double red,const double green,
const double blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const double red,const double green,
const double blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const double red,const double green,
const double blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static MagickBooleanType sRGBTransformImage(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define sRGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
status=MagickTrue;
progress=0;
switch (colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertRGBToCMYK(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelGray(image,ClampToQuantum(DecodePixelGamma(gray)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case Adobe98Colorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from sRGB to target colorspace.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case Adobe98Colorspace:
{
ConvertRGBToAdobe98(red,green,blue,&X,&Y,&Z);
break;
}
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case DisplayP3Colorspace:
{
ConvertRGBToDisplayP3(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case JzazbzColorspace:
{
ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,&X,&Y,&Z);
break;
}
case ProPhotoColorspace:
{
ConvertRGBToProPhoto(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(image,ClampToQuantum(QuantumRange*X),q);
SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q);
SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/
film_gamma))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) DecodePixelGamma((MagickRealType)
GetPixelRed(image,q));
green=(double) DecodePixelGamma((MagickRealType)
GetPixelGreen(image,q));
blue=(double) DecodePixelGamma((MagickRealType)
GetPixelBlue(image,q));
SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q);
SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))],
q);
SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.005382*i;
x_map[i].y=(-0.003296)*i;
x_map[i].z=0.009410*i;
y_map[i].x=0.010566*i;
y_map[i].y=(-0.006471)*i;
y_map[i].z=(-0.007880)*i;
z_map[i].x=0.002052*i;
z_map[i].y=0.009768*i;
z_map[i].z=(-0.001530)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.298839*(1.099*i-0.099);
x_map[i].y=(-0.298839)*(1.099*i-0.099);
x_map[i].z=0.70100*(1.099*i-0.099);
y_map[i].x=0.586811*(1.099*i-0.099);
y_map[i].y=(-0.586811)*(1.099*i-0.099);
y_map[i].z=(-0.586811)*(1.099*i-0.099);
z_map[i].x=0.114350*(1.099*i-0.099);
z_map[i].y=0.88600*(1.099*i-0.099);
z_map[i].z=(-0.114350)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
x_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].x=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
z_map[i].y=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
unsigned int
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(image,q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(image,q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(image,q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
primary_info.z;
SetPixelRed(image,ScaleMapToQuantum(pixel.red),q);
SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q);
SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,sRGBTransformImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
unsigned int
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red);
image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptiionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
ImageType
type;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) memset(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if (colorspace == LinearGRAYColorspace)
image->gamma=1.000;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.000;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageGray() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities and changes the type of the image to
% bi-level or grayscale.
%
% The format of the SetImageGray method is:
%
% MagickBooleanType SetImageGray(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageGray(Image *image,
ExceptionInfo *exception)
{
const char
*value;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageGray(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
type=IdentifyImageGray(image,exception);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMonochrome() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange and changes the type of the image to bi-level.
%
% The format of the SetImageMonochrome method is:
%
% MagickBooleanType SetImageMonochrome(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMonochrome(Image *image,
ExceptionInfo *exception)
{
const char
*value;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
if (IdentifyImageMonochrome(image,exception) == MagickFalse)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=BilevelType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace, changing the
% image data to reflect the new colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(SetImageColorspace(image,colorspace,exception));
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace,exception));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformsRGBImage(image,exception));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformsRGBImage(image,exception);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (sRGBTransformImage(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m s R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformsRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values
% to be [0..QuantumRange].
%
% The format of the TransformsRGBImage method is:
%
% MagickBooleanType TransformsRGBImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,double *red,double *green,double *blue)
{
*red=QuantumRange*(1.0-cyan);
*green=QuantumRange*(1.0-magenta);
*blue=QuantumRange*(1.0-yellow);
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const double value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,double *red,double *green,double *blue)
{
double
gamma,
X,
Y,
Z;
gamma=PerceptibleReciprocal(low_y);
X=gamma*cap_Y*low_x;
Y=cap_Y;
Z=gamma*cap_Y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+
1.4019995886561440468*(Pr-0.5));
*green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)-
0.71413649331646789076*(Pr-0.5));
*blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+
2.1453384174593273e-06*(Pr-0.5));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,double *red,double *green,double *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754*
(Q-0.5));
*green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427*
(Q-0.5));
*blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374*
(Q-0.5));
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5));
*green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5));
*blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825*
(V-0.5));
*green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797*
(V-0.5));
*blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04*
(V-0.5));
}
static MagickBooleanType TransformsRGBImage(Image *image,
ExceptionInfo *exception)
{
#define TransformsRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000f
};
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
switch (image->colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
gray=EncodePixelGamma(gray);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case Adobe98Colorspace:
case CMYColorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from source colorspace to sRGB.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
X=QuantumScale*GetPixelRed(image,q);
Y=QuantumScale*GetPixelGreen(image,q);
Z=QuantumScale*GetPixelBlue(image,q);
switch (image->colorspace)
{
case Adobe98Colorspace:
{
ConvertAdobe98ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case DisplayP3Colorspace:
{
ConvertDisplayP3ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case JzazbzColorspace:
{
ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case ProPhotoColorspace:
{
ConvertProPhotoToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=QuantumRange*X;
green=QuantumRange*Y;
blue=QuantumRange*Z;
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/
film_gamma)-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))];
green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))];
blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))];
SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
red)),q);
SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
green)),q);
SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
blue)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (image->colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) 0.0000000;
z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) 0.0000000;
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(image,q));
green=ScaleQuantumToMap(GetPixelGreen(image,q));
blue=ScaleQuantumToMap(GetPixelBlue(image,q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(image,ClampToQuantum(pixel.red),q);
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransformsRGBImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
size_t
blue,
green,
red;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=(double) ClampToQuantum(pixel.red);
image->colormap[i].green=(double) ClampToQuantum(pixel.green);
image->colormap[i].blue=(double) ClampToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
libperf_thread.c | /**
* Copyright (C) NVIDIA 2021. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <ucs/sys/module.h>
#include <ucs/sys/string.h>
#include <tools/perf/lib/libperf_int.h>
#include <string.h>
#include <unistd.h>
#if _OPENMP
# include <omp.h>
static ucs_status_t ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t status;
/* new threads need explicit device association */
status = perf->allocator->init(perf);
if (status != UCS_OK) {
goto out;
}
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
#pragma omp barrier
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_calc_result(perf, result);
out:
return status;
}
static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
unsigned i, thread_count = perf->params.thread_count;
double lat_sum_total_avegare = 0.0;
ucx_perf_result_t agg_result;
agg_result.iters = tctx[0].result.iters;
agg_result.bytes = tctx[0].result.bytes;
agg_result.elapsed_time = tctx[0].result.elapsed_time;
agg_result.bandwidth.total_average = 0.0;
agg_result.bandwidth.percentile = 0.0; /* Undefined since used only for latency calculations */
agg_result.latency.total_average = 0.0;
agg_result.msgrate.total_average = 0.0;
agg_result.msgrate.percentile = 0.0; /* Undefined since used only for latency calculations */
/* when running with multiple threads, the moment average value is
* undefined since we don't capture the values of the last iteration */
agg_result.msgrate.moment_average = 0.0;
agg_result.bandwidth.moment_average = 0.0;
agg_result.latency.moment_average = 0.0;
agg_result.latency.percentile = 0.0;
/* in case of multiple threads, we have to aggregate the results so that the
* final output of the result would show the performance numbers that were
* collected from all the threads.
* BW and message rate values will be the sum of their values from all
* the threads, while the latency value is the average latency from the
* threads. */
for (i = 0; i < thread_count; i++) {
agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average;
agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average;
lat_sum_total_avegare += tctx[i].result.latency.total_average;
}
agg_result.latency.total_average = lat_sum_total_avegare / thread_count;
rte_call(perf, report, &agg_result, perf->params.report_arg, "", 1, 1);
}
ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
int ti, thread_count = perf->params.thread_count;
ucs_status_t* statuses;
ucs_status_t status;
omp_set_num_threads(thread_count);
statuses = calloc(thread_count, sizeof(ucs_status_t));
if (statuses == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < thread_count; ti++) {
if (UCS_OK != tctx[ti].status) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(tctx[ti].status));
status = tctx[ti].status;
}
}
ucx_perf_thread_report_aggregated_results(perf);
free(statuses);
out:
return status;
}
#else
ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
|
dotProduct.h | #pragma once
#include <array>
#include <vector>
#include <algorithm>
#include <omp.h>
#include "_cuda.h"
#include "ceilDiv.h"
#include "sum.h"
using namespace std;
// Finds sum of element-by-element product of 2 vectors (arrays).
template <class T>
T dotProduct(T *x, T *y, int N) {
T a = T();
for (int i=0; i<N; i++)
a += x[i] * y[i];
return a;
}
template <class T, size_t N>
T dotProduct(array<T, N>& x, array<T, N>& y) {
return dotProduct(x.data(), y.data(), x.size());
}
template <class T>
T dotProduct(vector<T>& x, vector<T>& y) {
return dotProduct(x.data(), y.data(), x.size());
}
template <class T>
T dotProductOmp(T *x, T *y, int N) {
T a = T();
#pragma omp parallel for reduction (+:a)
for (int i=0; i<N; i++)
a += x[i] * y[i];
return a;
}
template <class T, size_t N>
T dotProductOmp(array<T, N>& x, array<T, N>& y) {
return dotProductOmp(x.data(), y.data(), x.size());
}
template <class T>
T dotProductOmp(vector<T>& x, vector<T>& y) {
return dotProductOmp(x.data(), y.data(), x.size());
}
template <class T>
__device__ T dotProductKernelLoop(T *x, T *y, int N, int i, int DI) {
T a = T();
for (; i<N; i+=DI)
a += x[i] * y[i];
return a;
}
template <class T>
__global__ void dotProductKernel(T *a, T *x, T *y, int N) {
DEFINE(t, b, B, G);
__shared__ T cache[_THREADS];
cache[t] = dotProductKernelLoop(x, y, N, B*b+t, G*B);
sumKernelReduce(cache, B, t);
if (t == 0) a[b] = cache[0];
}
template <class T>
T dotProductCuda(T *x, T *y, int N) {
int threads = _THREADS;
int blocks = max(ceilDiv(N, threads), 1024);
size_t X1 = N * sizeof(T);
size_t A1 = blocks * sizeof(T);
T *aPartial = (T*) malloc(A1);
T *xD, *yD, *aPartialD;
TRY( cudaMalloc(&xD, X1) );
TRY( cudaMalloc(&yD, X1) );
TRY( cudaMalloc(&aPartialD, A1) );
TRY( cudaMemcpy(xD, x, X1, cudaMemcpyHostToDevice) );
TRY( cudaMemcpy(yD, y, X1, cudaMemcpyHostToDevice) );
dotProductKernel<<<blocks, threads>>>(aPartialD, xD, yD, N);
TRY( cudaMemcpy(aPartial, aPartialD, A1, cudaMemcpyDeviceToHost) );
TRY( cudaFree(yD) );
TRY( cudaFree(xD) );
TRY( cudaFree(aPartialD) );
return sum(aPartial, blocks);
}
template <class T, size_t N>
T dotProductCuda(array<T, N>& x, array<T, N>& y) {
return dotProductCuda(x.data(), y.data(), N);
}
template <class T>
T dotProductCuda(vector<T>& x, vector<T>& y) {
return dotProductCuda(x.data(), y.data(), x.size());
}
|
gimplify.c | /* Tree lowering pass. This pass converts the GENERIC functions-as-trees
tree representation into the GIMPLE form.
Copyright (C) 2002-2017 Free Software Foundation, Inc.
Major work done by Sebastian Pop <s.pop@laposte.net>,
Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "gimple-predict.h"
#include "tree-pass.h" /* FIXME: only for PROP_gimple_any */
#include "ssa.h"
#include "cgraph.h"
#include "tree-pretty-print.h"
#include "diagnostic-core.h"
#include "alias.h"
#include "fold-const.h"
#include "calls.h"
#include "varasm.h"
#include "stmt.h"
#include "expr.h"
#include "gimple-fold.h"
#include "tree-eh.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "stor-layout.h"
#include "print-tree.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "tree-cfg.h"
#include "tree-ssa.h"
#include "omp-general.h"
#include "omp-low.h"
#include "gimple-low.h"
#include "cilk.h"
#include "gomp-constants.h"
#include "tree-dump.h"
#include "gimple-walk.h"
#include "langhooks-def.h" /* FIXME: for lhd_set_decl_assembler_name */
#include "builtins.h"
#include "asan.h"
#include "dbgcnt.h"
/* Hash set of poisoned variables in a bind expr. */
static hash_set<tree> *asan_poisoned_variables = NULL;
enum gimplify_omp_var_data
{
GOVD_SEEN = 1,
GOVD_EXPLICIT = 2,
GOVD_SHARED = 4,
GOVD_PRIVATE = 8,
GOVD_FIRSTPRIVATE = 16,
GOVD_LASTPRIVATE = 32,
GOVD_REDUCTION = 64,
GOVD_LOCAL = 128,
GOVD_MAP = 256,
GOVD_DEBUG_PRIVATE = 512,
GOVD_PRIVATE_OUTER_REF = 1024,
GOVD_LINEAR = 2048,
GOVD_ALIGNED = 4096,
/* Flag for GOVD_MAP: don't copy back. */
GOVD_MAP_TO_ONLY = 8192,
/* Flag for GOVD_LINEAR or GOVD_LASTPRIVATE: no outer reference. */
GOVD_LINEAR_LASTPRIVATE_NO_OUTER = 16384,
GOVD_MAP_0LEN_ARRAY = 32768,
/* Flag for GOVD_MAP, if it is always, to or always, tofrom mapping. */
GOVD_MAP_ALWAYS_TO = 65536,
/* Flag for shared vars that are or might be stored to in the region. */
GOVD_WRITTEN = 131072,
/* Flag for GOVD_MAP, if it is a forced mapping. */
GOVD_MAP_FORCE = 262144,
GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE
| GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LINEAR
| GOVD_LOCAL)
};
enum omp_region_type
{
ORT_WORKSHARE = 0x00,
ORT_SIMD = 0x01,
ORT_PARALLEL = 0x02,
ORT_COMBINED_PARALLEL = 0x03,
ORT_TASK = 0x04,
ORT_UNTIED_TASK = 0x05,
ORT_TEAMS = 0x08,
ORT_COMBINED_TEAMS = 0x09,
/* Data region. */
ORT_TARGET_DATA = 0x10,
/* Data region with offloading. */
ORT_TARGET = 0x20,
ORT_COMBINED_TARGET = 0x21,
/* OpenACC variants. */
ORT_ACC = 0x40, /* A generic OpenACC region. */
ORT_ACC_DATA = ORT_ACC | ORT_TARGET_DATA, /* Data construct. */
ORT_ACC_PARALLEL = ORT_ACC | ORT_TARGET, /* Parallel construct */
ORT_ACC_KERNELS = ORT_ACC | ORT_TARGET | 0x80, /* Kernels construct. */
ORT_ACC_HOST_DATA = ORT_ACC | ORT_TARGET_DATA | 0x80, /* Host data. */
/* Dummy OpenMP region, used to disable expansion of
DECL_VALUE_EXPRs in taskloop pre body. */
ORT_NONE = 0x100
};
/* Gimplify hashtable helper. */
struct gimplify_hasher : free_ptr_hash <elt_t>
{
static inline hashval_t hash (const elt_t *);
static inline bool equal (const elt_t *, const elt_t *);
};
struct gimplify_ctx
{
struct gimplify_ctx *prev_context;
vec<gbind *> bind_expr_stack;
tree temps;
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
vec<tree> case_labels;
hash_set<tree> *live_switch_vars;
/* The formal temporary table. Should this be persistent? */
hash_table<gimplify_hasher> *temp_htab;
int conditions;
unsigned into_ssa : 1;
unsigned allow_rhs_cond_expr : 1;
unsigned in_cleanup_point_expr : 1;
unsigned keep_stack : 1;
unsigned save_stack : 1;
unsigned in_switch_expr : 1;
};
struct gimplify_omp_ctx
{
struct gimplify_omp_ctx *outer_context;
splay_tree variables;
hash_set<tree> *privatized_types;
/* Iteration variables in an OMP_FOR. */
vec<tree> loop_iter_var;
location_t location;
enum omp_clause_default_kind default_kind;
enum omp_region_type region_type;
bool combined_loop;
bool distribute;
bool target_map_scalars_firstprivate;
bool target_map_pointers_as_0len_arrays;
bool target_firstprivatize_array_bases;
};
static struct gimplify_ctx *gimplify_ctxp;
static struct gimplify_omp_ctx *gimplify_omp_ctxp;
/* Forward declaration. */
static enum gimplify_status gimplify_compound_expr (tree *, gimple_seq *, bool);
static hash_map<tree, tree> *oacc_declare_returns;
static enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *,
bool (*) (tree), fallback_t, bool);
/* Shorter alias name for the above function for use in gimplify.c
only. */
static inline void
gimplify_seq_add_stmt (gimple_seq *seq_p, gimple *gs)
{
gimple_seq_add_stmt_without_update (seq_p, gs);
}
/* Append sequence SRC to the end of sequence *DST_P. If *DST_P is
NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_seq, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
static void
gimplify_seq_add_seq (gimple_seq *dst_p, gimple_seq src)
{
gimple_stmt_iterator si;
if (src == NULL)
return;
si = gsi_last (*dst_p);
gsi_insert_seq_after_without_update (&si, src, GSI_NEW_STMT);
}
/* Pointer to a list of allocated gimplify_ctx structs to be used for pushing
and popping gimplify contexts. */
static struct gimplify_ctx *ctx_pool = NULL;
/* Return a gimplify context struct from the pool. */
static inline struct gimplify_ctx *
ctx_alloc (void)
{
struct gimplify_ctx * c = ctx_pool;
if (c)
ctx_pool = c->prev_context;
else
c = XNEW (struct gimplify_ctx);
memset (c, '\0', sizeof (*c));
return c;
}
/* Put gimplify context C back into the pool. */
static inline void
ctx_free (struct gimplify_ctx *c)
{
c->prev_context = ctx_pool;
ctx_pool = c;
}
/* Free allocated ctx stack memory. */
void
free_gimplify_stack (void)
{
struct gimplify_ctx *c;
while ((c = ctx_pool))
{
ctx_pool = c->prev_context;
free (c);
}
}
/* Set up a context for the gimplifier. */
void
push_gimplify_context (bool in_ssa, bool rhs_cond_ok)
{
struct gimplify_ctx *c = ctx_alloc ();
c->prev_context = gimplify_ctxp;
gimplify_ctxp = c;
gimplify_ctxp->into_ssa = in_ssa;
gimplify_ctxp->allow_rhs_cond_expr = rhs_cond_ok;
}
/* Tear down a context for the gimplifier. If BODY is non-null, then
put the temporaries into the outer BIND_EXPR. Otherwise, put them
in the local_decls.
BODY is not a sequence, but the first tuple in a sequence. */
void
pop_gimplify_context (gimple *body)
{
struct gimplify_ctx *c = gimplify_ctxp;
gcc_assert (c
&& (!c->bind_expr_stack.exists ()
|| c->bind_expr_stack.is_empty ()));
c->bind_expr_stack.release ();
gimplify_ctxp = c->prev_context;
if (body)
declare_vars (c->temps, body, false);
else
record_vars (c->temps);
delete c->temp_htab;
c->temp_htab = NULL;
ctx_free (c);
}
/* Push a GIMPLE_BIND tuple onto the stack of bindings. */
static void
gimple_push_bind_expr (gbind *bind_stmt)
{
gimplify_ctxp->bind_expr_stack.reserve (8);
gimplify_ctxp->bind_expr_stack.safe_push (bind_stmt);
}
/* Pop the first element off the stack of bindings. */
static void
gimple_pop_bind_expr (void)
{
gimplify_ctxp->bind_expr_stack.pop ();
}
/* Return the first element of the stack of bindings. */
gbind *
gimple_current_bind_expr (void)
{
return gimplify_ctxp->bind_expr_stack.last ();
}
/* Return the stack of bindings created during gimplification. */
vec<gbind *>
gimple_bind_expr_stack (void)
{
return gimplify_ctxp->bind_expr_stack;
}
/* Return true iff there is a COND_EXPR between us and the innermost
CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */
static bool
gimple_conditional_context (void)
{
return gimplify_ctxp->conditions > 0;
}
/* Note that we've entered a COND_EXPR. */
static void
gimple_push_condition (void)
{
#ifdef ENABLE_GIMPLE_CHECKING
if (gimplify_ctxp->conditions == 0)
gcc_assert (gimple_seq_empty_p (gimplify_ctxp->conditional_cleanups));
#endif
++(gimplify_ctxp->conditions);
}
/* Note that we've left a COND_EXPR. If we're back at unconditional scope
now, add any conditional cleanups we've seen to the prequeue. */
static void
gimple_pop_condition (gimple_seq *pre_p)
{
int conds = --(gimplify_ctxp->conditions);
gcc_assert (conds >= 0);
if (conds == 0)
{
gimplify_seq_add_seq (pre_p, gimplify_ctxp->conditional_cleanups);
gimplify_ctxp->conditional_cleanups = NULL;
}
}
/* A stable comparison routine for use with splay trees and DECLs. */
static int
splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb)
{
tree a = (tree) xa;
tree b = (tree) xb;
return DECL_UID (a) - DECL_UID (b);
}
/* Create a new omp construct that deals with variable remapping. */
static struct gimplify_omp_ctx *
new_omp_context (enum omp_region_type region_type)
{
struct gimplify_omp_ctx *c;
c = XCNEW (struct gimplify_omp_ctx);
c->outer_context = gimplify_omp_ctxp;
c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0);
c->privatized_types = new hash_set<tree>;
c->location = input_location;
c->region_type = region_type;
if ((region_type & ORT_TASK) == 0)
c->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
else
c->default_kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
return c;
}
/* Destroy an omp construct that deals with variable remapping. */
static void
delete_omp_context (struct gimplify_omp_ctx *c)
{
splay_tree_delete (c->variables);
delete c->privatized_types;
c->loop_iter_var.release ();
XDELETE (c);
}
static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int);
static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool);
/* Both gimplify the statement T and append it to *SEQ_P. This function
behaves exactly as gimplify_stmt, but you don't have to pass T as a
reference. */
void
gimplify_and_add (tree t, gimple_seq *seq_p)
{
gimplify_stmt (&t, seq_p);
}
/* Gimplify statement T into sequence *SEQ_P, and return the first
tuple in the sequence of generated tuples for this statement.
Return NULL if gimplifying T produced no tuples. */
static gimple *
gimplify_and_return_first (tree t, gimple_seq *seq_p)
{
gimple_stmt_iterator last = gsi_last (*seq_p);
gimplify_and_add (t, seq_p);
if (!gsi_end_p (last))
{
gsi_next (&last);
return gsi_stmt (last);
}
else
return gimple_seq_first_stmt (*seq_p);
}
/* Returns true iff T is a valid RHS for an assignment to an un-renamed
LHS, or for a call argument. */
static bool
is_gimple_mem_rhs (tree t)
{
/* If we're dealing with a renamable type, either source or dest must be
a renamed variable. */
if (is_gimple_reg_type (TREE_TYPE (t)))
return is_gimple_val (t);
else
return is_gimple_val (t) || is_gimple_lvalue (t);
}
/* Return true if T is a CALL_EXPR or an expression that can be
assigned to a temporary. Note that this predicate should only be
used during gimplification. See the rationale for this in
gimplify_modify_expr. */
static bool
is_gimple_reg_rhs_or_call (tree t)
{
return (get_gimple_rhs_class (TREE_CODE (t)) != GIMPLE_INVALID_RHS
|| TREE_CODE (t) == CALL_EXPR);
}
/* Return true if T is a valid memory RHS or a CALL_EXPR. Note that
this predicate should only be used during gimplification. See the
rationale for this in gimplify_modify_expr. */
static bool
is_gimple_mem_rhs_or_call (tree t)
{
/* If we're dealing with a renamable type, either source or dest must be
a renamed variable. */
if (is_gimple_reg_type (TREE_TYPE (t)))
return is_gimple_val (t);
else
return (is_gimple_val (t)
|| is_gimple_lvalue (t)
|| TREE_CLOBBER_P (t)
|| TREE_CODE (t) == CALL_EXPR);
}
/* Create a temporary with a name derived from VAL. Subroutine of
lookup_tmp_var; nobody else should call this function. */
static inline tree
create_tmp_from_val (tree val)
{
/* Drop all qualifiers and address-space information from the value type. */
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (val));
tree var = create_tmp_var (type, get_name (val));
if (TREE_CODE (TREE_TYPE (var)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (var) = 1;
return var;
}
/* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse
an existing expression temporary. */
static tree
lookup_tmp_var (tree val, bool is_formal)
{
tree ret;
/* If not optimizing, never really reuse a temporary. local-alloc
won't allocate any variable that is used in more than one basic
block, which means it will go into memory, causing much extra
work in reload and final and poorer code generation, outweighing
the extra memory allocation here. */
if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val))
ret = create_tmp_from_val (val);
else
{
elt_t elt, *elt_p;
elt_t **slot;
elt.val = val;
if (!gimplify_ctxp->temp_htab)
gimplify_ctxp->temp_htab = new hash_table<gimplify_hasher> (1000);
slot = gimplify_ctxp->temp_htab->find_slot (&elt, INSERT);
if (*slot == NULL)
{
elt_p = XNEW (elt_t);
elt_p->val = val;
elt_p->temp = ret = create_tmp_from_val (val);
*slot = elt_p;
}
else
{
elt_p = *slot;
ret = elt_p->temp;
}
}
return ret;
}
/* Helper for get_formal_tmp_var and get_initialized_tmp_var. */
static tree
internal_get_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p,
bool is_formal, bool allow_ssa)
{
tree t, mod;
/* Notice that we explicitly allow VAL to be a CALL_EXPR so that we
can create an INIT_EXPR and convert it into a GIMPLE_CALL below. */
gimplify_expr (&val, pre_p, post_p, is_gimple_reg_rhs_or_call,
fb_rvalue);
if (allow_ssa
&& gimplify_ctxp->into_ssa
&& is_gimple_reg_type (TREE_TYPE (val)))
{
t = make_ssa_name (TYPE_MAIN_VARIANT (TREE_TYPE (val)));
if (! gimple_in_ssa_p (cfun))
{
const char *name = get_name (val);
if (name)
SET_SSA_NAME_VAR_OR_IDENTIFIER (t, create_tmp_var_name (name));
}
}
else
t = lookup_tmp_var (val, is_formal);
mod = build2 (INIT_EXPR, TREE_TYPE (t), t, unshare_expr (val));
SET_EXPR_LOCATION (mod, EXPR_LOC_OR_LOC (val, input_location));
/* gimplify_modify_expr might want to reduce this further. */
gimplify_and_add (mod, pre_p);
ggc_free (mod);
return t;
}
/* Return a formal temporary variable initialized with VAL. PRE_P is as
in gimplify_expr. Only use this function if:
1) The value of the unfactored expression represented by VAL will not
change between the initialization and use of the temporary, and
2) The temporary will not be otherwise modified.
For instance, #1 means that this is inappropriate for SAVE_EXPR temps,
and #2 means it is inappropriate for && temps.
For other cases, use get_initialized_tmp_var instead. */
tree
get_formal_tmp_var (tree val, gimple_seq *pre_p)
{
return internal_get_tmp_var (val, pre_p, NULL, true, true);
}
/* Return a temporary variable initialized with VAL. PRE_P and POST_P
are as in gimplify_expr. */
tree
get_initialized_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p,
bool allow_ssa)
{
return internal_get_tmp_var (val, pre_p, post_p, false, allow_ssa);
}
/* Declare all the variables in VARS in SCOPE. If DEBUG_INFO is true,
generate debug info for them; otherwise don't. */
void
declare_vars (tree vars, gimple *gs, bool debug_info)
{
tree last = vars;
if (last)
{
tree temps, block;
gbind *scope = as_a <gbind *> (gs);
temps = nreverse (last);
block = gimple_bind_block (scope);
gcc_assert (!block || TREE_CODE (block) == BLOCK);
if (!block || !debug_info)
{
DECL_CHAIN (last) = gimple_bind_vars (scope);
gimple_bind_set_vars (scope, temps);
}
else
{
/* We need to attach the nodes both to the BIND_EXPR and to its
associated BLOCK for debugging purposes. The key point here
is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR
is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */
if (BLOCK_VARS (block))
BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps);
else
{
gimple_bind_set_vars (scope,
chainon (gimple_bind_vars (scope), temps));
BLOCK_VARS (block) = temps;
}
}
}
}
/* For VAR a VAR_DECL of variable size, try to find a constant upper bound
for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if
no such upper bound can be obtained. */
static void
force_constant_size (tree var)
{
/* The only attempt we make is by querying the maximum size of objects
of the variable's type. */
HOST_WIDE_INT max_size;
gcc_assert (VAR_P (var));
max_size = max_int_size_in_bytes (TREE_TYPE (var));
gcc_assert (max_size >= 0);
DECL_SIZE_UNIT (var)
= build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size);
DECL_SIZE (var)
= build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT);
}
/* Push the temporary variable TMP into the current binding. */
void
gimple_add_tmp_var_fn (struct function *fn, tree tmp)
{
gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp));
/* Later processing assumes that the object size is constant, which might
not be true at this point. Force the use of a constant upper bound in
this case. */
if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (tmp)))
force_constant_size (tmp);
DECL_CONTEXT (tmp) = fn->decl;
DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1;
record_vars_into (tmp, fn->decl);
}
/* Push the temporary variable TMP into the current binding. */
void
gimple_add_tmp_var (tree tmp)
{
gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp));
/* Later processing assumes that the object size is constant, which might
not be true at this point. Force the use of a constant upper bound in
this case. */
if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (tmp)))
force_constant_size (tmp);
DECL_CONTEXT (tmp) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1;
if (gimplify_ctxp)
{
DECL_CHAIN (tmp) = gimplify_ctxp->temps;
gimplify_ctxp->temps = tmp;
/* Mark temporaries local within the nearest enclosing parallel. */
if (gimplify_omp_ctxp)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
while (ctx
&& (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_SIMD
|| ctx->region_type == ORT_ACC))
ctx = ctx->outer_context;
if (ctx)
omp_add_variable (ctx, tmp, GOVD_LOCAL | GOVD_SEEN);
}
}
else if (cfun)
record_vars (tmp);
else
{
gimple_seq body_seq;
/* This case is for nested functions. We need to expose the locals
they create. */
body_seq = gimple_body (current_function_decl);
declare_vars (tmp, gimple_seq_first_stmt (body_seq), false);
}
}
/* This page contains routines to unshare tree nodes, i.e. to duplicate tree
nodes that are referenced more than once in GENERIC functions. This is
necessary because gimplification (translation into GIMPLE) is performed
by modifying tree nodes in-place, so gimplication of a shared node in a
first context could generate an invalid GIMPLE form in a second context.
This is achieved with a simple mark/copy/unmark algorithm that walks the
GENERIC representation top-down, marks nodes with TREE_VISITED the first
time it encounters them, duplicates them if they already have TREE_VISITED
set, and finally removes the TREE_VISITED marks it has set.
The algorithm works only at the function level, i.e. it generates a GENERIC
representation of a function with no nodes shared within the function when
passed a GENERIC function (except for nodes that are allowed to be shared).
At the global level, it is also necessary to unshare tree nodes that are
referenced in more than one function, for the same aforementioned reason.
This requires some cooperation from the front-end. There are 2 strategies:
1. Manual unsharing. The front-end needs to call unshare_expr on every
expression that might end up being shared across functions.
2. Deep unsharing. This is an extension of regular unsharing. Instead
of calling unshare_expr on expressions that might be shared across
functions, the front-end pre-marks them with TREE_VISITED. This will
ensure that they are unshared on the first reference within functions
when the regular unsharing algorithm runs. The counterpart is that
this algorithm must look deeper than for manual unsharing, which is
specified by LANG_HOOKS_DEEP_UNSHARING.
If there are only few specific cases of node sharing across functions, it is
probably easier for a front-end to unshare the expressions manually. On the
contrary, if the expressions generated at the global level are as widespread
as expressions generated within functions, deep unsharing is very likely the
way to go. */
/* Similar to copy_tree_r but do not copy SAVE_EXPR or TARGET_EXPR nodes.
These nodes model computations that must be done once. If we were to
unshare something like SAVE_EXPR(i++), the gimplification process would
create wrong code. However, if DATA is non-null, it must hold a pointer
set that is used to unshare the subtrees of these nodes. */
static tree
mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
/* Do not copy SAVE_EXPR, TARGET_EXPR or BIND_EXPR nodes themselves, but
copy their subtrees if we can make sure to do it only once. */
if (code == SAVE_EXPR || code == TARGET_EXPR || code == BIND_EXPR)
{
if (data && !((hash_set<tree> *)data)->add (t))
;
else
*walk_subtrees = 0;
}
/* Stop at types, decls, constants like copy_tree_r. */
else if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant
/* We can't do anything sensible with a BLOCK used as an
expression, but we also can't just die when we see it
because of non-expression uses. So we avert our eyes
and cross our fingers. Silly Java. */
|| code == BLOCK)
*walk_subtrees = 0;
/* Cope with the statement expression extension. */
else if (code == STATEMENT_LIST)
;
/* Leave the bulk of the work to copy_tree_r itself. */
else
copy_tree_r (tp, walk_subtrees, NULL);
return NULL_TREE;
}
/* Callback for walk_tree to unshare most of the shared trees rooted at *TP.
If *TP has been visited already, then *TP is deeply copied by calling
mostly_copy_tree_r. DATA is passed to mostly_copy_tree_r unmodified. */
static tree
copy_if_shared_r (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
/* Skip types, decls, and constants. But we do want to look at their
types and the bounds of types. Mark them as visited so we properly
unmark their subtrees on the unmark pass. If we've already seen them,
don't look down further. */
if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant)
{
if (TREE_VISITED (t))
*walk_subtrees = 0;
else
TREE_VISITED (t) = 1;
}
/* If this node has been visited already, unshare it and don't look
any deeper. */
else if (TREE_VISITED (t))
{
walk_tree (tp, mostly_copy_tree_r, data, NULL);
*walk_subtrees = 0;
}
/* Otherwise, mark the node as visited and keep looking. */
else
TREE_VISITED (t) = 1;
return NULL_TREE;
}
/* Unshare most of the shared trees rooted at *TP. DATA is passed to the
copy_if_shared_r callback unmodified. */
static inline void
copy_if_shared (tree *tp, void *data)
{
walk_tree (tp, copy_if_shared_r, data, NULL);
}
/* Unshare all the trees in the body of FNDECL, as well as in the bodies of
any nested functions. */
static void
unshare_body (tree fndecl)
{
struct cgraph_node *cgn = cgraph_node::get (fndecl);
/* If the language requires deep unsharing, we need a pointer set to make
sure we don't repeatedly unshare subtrees of unshareable nodes. */
hash_set<tree> *visited
= lang_hooks.deep_unsharing ? new hash_set<tree> : NULL;
copy_if_shared (&DECL_SAVED_TREE (fndecl), visited);
copy_if_shared (&DECL_SIZE (DECL_RESULT (fndecl)), visited);
copy_if_shared (&DECL_SIZE_UNIT (DECL_RESULT (fndecl)), visited);
delete visited;
if (cgn)
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unshare_body (cgn->decl);
}
/* Callback for walk_tree to unmark the visited trees rooted at *TP.
Subtrees are walked until the first unvisited node is encountered. */
static tree
unmark_visited_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
tree t = *tp;
/* If this node has been visited, unmark it and keep looking. */
if (TREE_VISITED (t))
TREE_VISITED (t) = 0;
/* Otherwise, don't look any deeper. */
else
*walk_subtrees = 0;
return NULL_TREE;
}
/* Unmark the visited trees rooted at *TP. */
static inline void
unmark_visited (tree *tp)
{
walk_tree (tp, unmark_visited_r, NULL, NULL);
}
/* Likewise, but mark all trees as not visited. */
static void
unvisit_body (tree fndecl)
{
struct cgraph_node *cgn = cgraph_node::get (fndecl);
unmark_visited (&DECL_SAVED_TREE (fndecl));
unmark_visited (&DECL_SIZE (DECL_RESULT (fndecl)));
unmark_visited (&DECL_SIZE_UNIT (DECL_RESULT (fndecl)));
if (cgn)
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unvisit_body (cgn->decl);
}
/* Unconditionally make an unshared copy of EXPR. This is used when using
stored expressions which span multiple functions, such as BINFO_VTABLE,
as the normal unsharing process can't tell that they're shared. */
tree
unshare_expr (tree expr)
{
walk_tree (&expr, mostly_copy_tree_r, NULL, NULL);
return expr;
}
/* Worker for unshare_expr_without_location. */
static tree
prune_expr_location (tree *tp, int *walk_subtrees, void *)
{
if (EXPR_P (*tp))
SET_EXPR_LOCATION (*tp, UNKNOWN_LOCATION);
else
*walk_subtrees = 0;
return NULL_TREE;
}
/* Similar to unshare_expr but also prune all expression locations
from EXPR. */
tree
unshare_expr_without_location (tree expr)
{
walk_tree (&expr, mostly_copy_tree_r, NULL, NULL);
if (EXPR_P (expr))
walk_tree (&expr, prune_expr_location, NULL, NULL);
return expr;
}
/* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both
contain statements and have a value. Assign its value to a temporary
and give it void_type_node. Return the temporary, or NULL_TREE if
WRAPPER was already void. */
tree
voidify_wrapper_expr (tree wrapper, tree temp)
{
tree type = TREE_TYPE (wrapper);
if (type && !VOID_TYPE_P (type))
{
tree *p;
/* Set p to point to the body of the wrapper. Loop until we find
something that isn't a wrapper. */
for (p = &wrapper; p && *p; )
{
switch (TREE_CODE (*p))
{
case BIND_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
/* For a BIND_EXPR, the body is operand 1. */
p = &BIND_EXPR_BODY (*p);
break;
case CLEANUP_POINT_EXPR:
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TREE_OPERAND (*p, 0);
break;
case STATEMENT_LIST:
{
tree_stmt_iterator i = tsi_last (*p);
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i);
}
break;
case COMPOUND_EXPR:
/* Advance to the last statement. Set all container types to
void. */
for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1))
{
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
}
break;
case TRANSACTION_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TRANSACTION_EXPR_BODY (*p);
break;
default:
/* Assume that any tree upon which voidify_wrapper_expr is
directly called is a wrapper, and that its body is op0. */
if (p == &wrapper)
{
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TREE_OPERAND (*p, 0);
break;
}
goto out;
}
}
out:
if (p == NULL || IS_EMPTY_STMT (*p))
temp = NULL_TREE;
else if (temp)
{
/* The wrapper is on the RHS of an assignment that we're pushing
down. */
gcc_assert (TREE_CODE (temp) == INIT_EXPR
|| TREE_CODE (temp) == MODIFY_EXPR);
TREE_OPERAND (temp, 1) = *p;
*p = temp;
}
else
{
temp = create_tmp_var (type, "retval");
*p = build2 (INIT_EXPR, type, temp, *p);
}
return temp;
}
return NULL_TREE;
}
/* Prepare calls to builtins to SAVE and RESTORE the stack as well as
a temporary through which they communicate. */
static void
build_stack_save_restore (gcall **save, gcall **restore)
{
tree tmp_var;
*save = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_SAVE), 0);
tmp_var = create_tmp_var (ptr_type_node, "saved_stack");
gimple_call_set_lhs (*save, tmp_var);
*restore
= gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_RESTORE),
1, tmp_var);
}
/* Generate IFN_ASAN_MARK call that poisons shadow of a for DECL variable. */
static tree
build_asan_poison_call_expr (tree decl)
{
/* Do not poison variables that have size equal to zero. */
tree unit_size = DECL_SIZE_UNIT (decl);
if (zerop (unit_size))
return NULL_TREE;
tree base = build_fold_addr_expr (decl);
return build_call_expr_internal_loc (UNKNOWN_LOCATION, IFN_ASAN_MARK,
void_type_node, 3,
build_int_cst (integer_type_node,
ASAN_MARK_POISON),
base, unit_size);
}
/* Generate IFN_ASAN_MARK call that would poison or unpoison, depending
on POISON flag, shadow memory of a DECL variable. The call will be
put on location identified by IT iterator, where BEFORE flag drives
position where the stmt will be put. */
static void
asan_poison_variable (tree decl, bool poison, gimple_stmt_iterator *it,
bool before)
{
/* When within an OMP context, do not emit ASAN_MARK internal fns. */
if (gimplify_omp_ctxp)
return;
tree unit_size = DECL_SIZE_UNIT (decl);
tree base = build_fold_addr_expr (decl);
/* Do not poison variables that have size equal to zero. */
if (zerop (unit_size))
return;
/* It's necessary to have all stack variables aligned to ASAN granularity
bytes. */
if (DECL_ALIGN_UNIT (decl) <= ASAN_SHADOW_GRANULARITY)
SET_DECL_ALIGN (decl, BITS_PER_UNIT * ASAN_SHADOW_GRANULARITY);
HOST_WIDE_INT flags = poison ? ASAN_MARK_POISON : ASAN_MARK_UNPOISON;
gimple *g
= gimple_build_call_internal (IFN_ASAN_MARK, 3,
build_int_cst (integer_type_node, flags),
base, unit_size);
if (before)
gsi_insert_before (it, g, GSI_NEW_STMT);
else
gsi_insert_after (it, g, GSI_NEW_STMT);
}
/* Generate IFN_ASAN_MARK internal call that depending on POISON flag
either poisons or unpoisons a DECL. Created statement is appended
to SEQ_P gimple sequence. */
static void
asan_poison_variable (tree decl, bool poison, gimple_seq *seq_p)
{
gimple_stmt_iterator it = gsi_last (*seq_p);
bool before = false;
if (gsi_end_p (it))
before = true;
asan_poison_variable (decl, poison, &it, before);
}
/* Sort pair of VAR_DECLs A and B by DECL_UID. */
static int
sort_by_decl_uid (const void *a, const void *b)
{
const tree *t1 = (const tree *)a;
const tree *t2 = (const tree *)b;
int uid1 = DECL_UID (*t1);
int uid2 = DECL_UID (*t2);
if (uid1 < uid2)
return -1;
else if (uid1 > uid2)
return 1;
else
return 0;
}
/* Generate IFN_ASAN_MARK internal call for all VARIABLES
depending on POISON flag. Created statement is appended
to SEQ_P gimple sequence. */
static void
asan_poison_variables (hash_set<tree> *variables, bool poison, gimple_seq *seq_p)
{
unsigned c = variables->elements ();
if (c == 0)
return;
auto_vec<tree> sorted_variables (c);
for (hash_set<tree>::iterator it = variables->begin ();
it != variables->end (); ++it)
sorted_variables.safe_push (*it);
sorted_variables.qsort (sort_by_decl_uid);
unsigned i;
tree var;
FOR_EACH_VEC_ELT (sorted_variables, i, var)
{
asan_poison_variable (var, poison, seq_p);
/* Add use_after_scope_memory attribute for the variable in order
to prevent re-written into SSA. */
if (!lookup_attribute (ASAN_USE_AFTER_SCOPE_ATTRIBUTE,
DECL_ATTRIBUTES (var)))
DECL_ATTRIBUTES (var)
= tree_cons (get_identifier (ASAN_USE_AFTER_SCOPE_ATTRIBUTE),
integer_one_node,
DECL_ATTRIBUTES (var));
}
}
/* Gimplify a BIND_EXPR. Just voidify and recurse. */
static enum gimplify_status
gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p)
{
tree bind_expr = *expr_p;
bool old_keep_stack = gimplify_ctxp->keep_stack;
bool old_save_stack = gimplify_ctxp->save_stack;
tree t;
gbind *bind_stmt;
gimple_seq body, cleanup;
gcall *stack_save;
location_t start_locus = 0, end_locus = 0;
tree ret_clauses = NULL;
tree temp = voidify_wrapper_expr (bind_expr, NULL);
/* Mark variables seen in this bind expr. */
for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t))
{
if (VAR_P (t))
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
/* Mark variable as local. */
if (ctx && ctx->region_type != ORT_NONE && !DECL_EXTERNAL (t)
&& (! DECL_SEEN_IN_BIND_EXPR_P (t)
|| splay_tree_lookup (ctx->variables,
(splay_tree_key) t) == NULL))
{
if (ctx->region_type == ORT_SIMD
&& TREE_ADDRESSABLE (t)
&& !TREE_STATIC (t))
omp_add_variable (ctx, t, GOVD_PRIVATE | GOVD_SEEN);
else
omp_add_variable (ctx, t, GOVD_LOCAL | GOVD_SEEN);
}
DECL_SEEN_IN_BIND_EXPR_P (t) = 1;
if (DECL_HARD_REGISTER (t) && !is_global_var (t) && cfun)
cfun->has_local_explicit_reg_vars = true;
}
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (t)
&& (VAR_P (t) && !DECL_HARD_REGISTER (t))
&& !needs_to_live_in_memory (t))
DECL_GIMPLE_REG_P (t) = 1;
}
bind_stmt = gimple_build_bind (BIND_EXPR_VARS (bind_expr), NULL,
BIND_EXPR_BLOCK (bind_expr));
gimple_push_bind_expr (bind_stmt);
gimplify_ctxp->keep_stack = false;
gimplify_ctxp->save_stack = false;
/* Gimplify the body into the GIMPLE_BIND tuple's body. */
body = NULL;
gimplify_stmt (&BIND_EXPR_BODY (bind_expr), &body);
gimple_bind_set_body (bind_stmt, body);
/* Source location wise, the cleanup code (stack_restore and clobbers)
belongs to the end of the block, so propagate what we have. The
stack_save operation belongs to the beginning of block, which we can
infer from the bind_expr directly if the block has no explicit
assignment. */
if (BIND_EXPR_BLOCK (bind_expr))
{
end_locus = BLOCK_SOURCE_END_LOCATION (BIND_EXPR_BLOCK (bind_expr));
start_locus = BLOCK_SOURCE_LOCATION (BIND_EXPR_BLOCK (bind_expr));
}
if (start_locus == 0)
start_locus = EXPR_LOCATION (bind_expr);
cleanup = NULL;
stack_save = NULL;
/* If the code both contains VLAs and calls alloca, then we cannot reclaim
the stack space allocated to the VLAs. */
if (gimplify_ctxp->save_stack && !gimplify_ctxp->keep_stack)
{
gcall *stack_restore;
/* Save stack on entry and restore it on exit. Add a try_finally
block to achieve this. */
build_stack_save_restore (&stack_save, &stack_restore);
gimple_set_location (stack_save, start_locus);
gimple_set_location (stack_restore, end_locus);
gimplify_seq_add_stmt (&cleanup, stack_restore);
}
/* Add clobbers for all variables that go out of scope. */
for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t))
{
if (VAR_P (t)
&& !is_global_var (t)
&& DECL_CONTEXT (t) == current_function_decl)
{
if (!DECL_HARD_REGISTER (t)
&& !TREE_THIS_VOLATILE (t)
&& !DECL_HAS_VALUE_EXPR_P (t)
/* Only care for variables that have to be in memory. Others
will be rewritten into SSA names, hence moved to the
top-level. */
&& !is_gimple_reg (t)
&& flag_stack_reuse != SR_NONE)
{
tree clobber = build_constructor (TREE_TYPE (t), NULL);
gimple *clobber_stmt;
TREE_THIS_VOLATILE (clobber) = 1;
clobber_stmt = gimple_build_assign (t, clobber);
gimple_set_location (clobber_stmt, end_locus);
gimplify_seq_add_stmt (&cleanup, clobber_stmt);
}
if (flag_openacc && oacc_declare_returns != NULL)
{
tree *c = oacc_declare_returns->get (t);
if (c != NULL)
{
if (ret_clauses)
OMP_CLAUSE_CHAIN (*c) = ret_clauses;
ret_clauses = *c;
oacc_declare_returns->remove (t);
if (oacc_declare_returns->elements () == 0)
{
delete oacc_declare_returns;
oacc_declare_returns = NULL;
}
}
}
}
if (asan_poisoned_variables != NULL
&& asan_poisoned_variables->contains (t))
{
asan_poisoned_variables->remove (t);
asan_poison_variable (t, true, &cleanup);
}
if (gimplify_ctxp->live_switch_vars != NULL
&& gimplify_ctxp->live_switch_vars->contains (t))
gimplify_ctxp->live_switch_vars->remove (t);
}
if (ret_clauses)
{
gomp_target *stmt;
gimple_stmt_iterator si = gsi_start (cleanup);
stmt = gimple_build_omp_target (NULL, GF_OMP_TARGET_KIND_OACC_DECLARE,
ret_clauses);
gsi_insert_seq_before_without_update (&si, stmt, GSI_NEW_STMT);
}
if (cleanup)
{
gtry *gs;
gimple_seq new_body;
new_body = NULL;
gs = gimple_build_try (gimple_bind_body (bind_stmt), cleanup,
GIMPLE_TRY_FINALLY);
if (stack_save)
gimplify_seq_add_stmt (&new_body, stack_save);
gimplify_seq_add_stmt (&new_body, gs);
gimple_bind_set_body (bind_stmt, new_body);
}
/* keep_stack propagates all the way up to the outermost BIND_EXPR. */
if (!gimplify_ctxp->keep_stack)
gimplify_ctxp->keep_stack = old_keep_stack;
gimplify_ctxp->save_stack = old_save_stack;
gimple_pop_bind_expr ();
gimplify_seq_add_stmt (pre_p, bind_stmt);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify a RETURN_EXPR. If the expression to be returned is not a
GIMPLE value, it is assigned to a new temporary and the statement is
re-written to return the temporary.
PRE_P points to the sequence where side effects that must happen before
STMT should be stored. */
static enum gimplify_status
gimplify_return_expr (tree stmt, gimple_seq *pre_p)
{
greturn *ret;
tree ret_expr = TREE_OPERAND (stmt, 0);
tree result_decl, result;
if (ret_expr == error_mark_node)
return GS_ERROR;
/* Implicit _Cilk_sync must be inserted right before any return statement
if there is a _Cilk_spawn in the function. If the user has provided a
_Cilk_sync, the optimizer should remove this duplicate one. */
if (fn_contains_cilk_spawn_p (cfun))
{
tree impl_sync = build0 (CILK_SYNC_STMT, void_type_node);
gimplify_and_add (impl_sync, pre_p);
}
if (!ret_expr
|| TREE_CODE (ret_expr) == RESULT_DECL
|| ret_expr == error_mark_node)
{
greturn *ret = gimple_build_return (ret_expr);
gimple_set_no_warning (ret, TREE_NO_WARNING (stmt));
gimplify_seq_add_stmt (pre_p, ret);
return GS_ALL_DONE;
}
if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))))
result_decl = NULL_TREE;
else
{
result_decl = TREE_OPERAND (ret_expr, 0);
/* See through a return by reference. */
if (TREE_CODE (result_decl) == INDIRECT_REF)
result_decl = TREE_OPERAND (result_decl, 0);
gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR
|| TREE_CODE (ret_expr) == INIT_EXPR)
&& TREE_CODE (result_decl) == RESULT_DECL);
}
/* If aggregate_value_p is true, then we can return the bare RESULT_DECL.
Recall that aggregate_value_p is FALSE for any aggregate type that is
returned in registers. If we're returning values in registers, then
we don't want to extend the lifetime of the RESULT_DECL, particularly
across another call. In addition, for those aggregates for which
hard_function_value generates a PARALLEL, we'll die during normal
expansion of structure assignments; there's special code in expand_return
to handle this case that does not exist in expand_expr. */
if (!result_decl)
result = NULL_TREE;
else if (aggregate_value_p (result_decl, TREE_TYPE (current_function_decl)))
{
if (TREE_CODE (DECL_SIZE (result_decl)) != INTEGER_CST)
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (result_decl)))
gimplify_type_sizes (TREE_TYPE (result_decl), pre_p);
/* Note that we don't use gimplify_vla_decl because the RESULT_DECL
should be effectively allocated by the caller, i.e. all calls to
this function must be subject to the Return Slot Optimization. */
gimplify_one_sizepos (&DECL_SIZE (result_decl), pre_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (result_decl), pre_p);
}
result = result_decl;
}
else if (gimplify_ctxp->return_temp)
result = gimplify_ctxp->return_temp;
else
{
result = create_tmp_reg (TREE_TYPE (result_decl));
/* ??? With complex control flow (usually involving abnormal edges),
we can wind up warning about an uninitialized value for this. Due
to how this variable is constructed and initialized, this is never
true. Give up and never warn. */
TREE_NO_WARNING (result) = 1;
gimplify_ctxp->return_temp = result;
}
/* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use.
Then gimplify the whole thing. */
if (result != result_decl)
TREE_OPERAND (ret_expr, 0) = result;
gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p);
ret = gimple_build_return (result);
gimple_set_no_warning (ret, TREE_NO_WARNING (stmt));
gimplify_seq_add_stmt (pre_p, ret);
return GS_ALL_DONE;
}
/* Gimplify a variable-length array DECL. */
static void
gimplify_vla_decl (tree decl, gimple_seq *seq_p)
{
/* This is a variable-sized decl. Simplify its size and mark it
for deferred expansion. */
tree t, addr, ptr_type;
gimplify_one_sizepos (&DECL_SIZE (decl), seq_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), seq_p);
/* Don't mess with a DECL_VALUE_EXPR set by the front-end. */
if (DECL_HAS_VALUE_EXPR_P (decl))
return;
/* All occurrences of this decl in final gimplified code will be
replaced by indirection. Setting DECL_VALUE_EXPR does two
things: First, it lets the rest of the gimplifier know what
replacement to use. Second, it lets the debug info know
where to find the value. */
ptr_type = build_pointer_type (TREE_TYPE (decl));
addr = create_tmp_var (ptr_type, get_name (decl));
DECL_IGNORED_P (addr) = 0;
t = build_fold_indirect_ref (addr);
TREE_THIS_NOTRAP (t) = 1;
SET_DECL_VALUE_EXPR (decl, t);
DECL_HAS_VALUE_EXPR_P (decl) = 1;
t = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
t = build_call_expr (t, 2, DECL_SIZE_UNIT (decl),
size_int (DECL_ALIGN (decl)));
/* The call has been built for a variable-sized object. */
CALL_ALLOCA_FOR_VAR_P (t) = 1;
t = fold_convert (ptr_type, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t);
gimplify_and_add (t, seq_p);
}
/* A helper function to be called via walk_tree. Mark all labels under *TP
as being forced. To be called for DECL_INITIAL of static variables. */
static tree
force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
if (TYPE_P (*tp))
*walk_subtrees = 0;
if (TREE_CODE (*tp) == LABEL_DECL)
{
FORCED_LABEL (*tp) = 1;
cfun->has_forced_label_in_static = 1;
}
return NULL_TREE;
}
/* Gimplify a DECL_EXPR node *STMT_P by making any necessary allocation
and initialization explicit. */
static enum gimplify_status
gimplify_decl_expr (tree *stmt_p, gimple_seq *seq_p)
{
tree stmt = *stmt_p;
tree decl = DECL_EXPR_DECL (stmt);
*stmt_p = NULL_TREE;
if (TREE_TYPE (decl) == error_mark_node)
return GS_ERROR;
if ((TREE_CODE (decl) == TYPE_DECL
|| VAR_P (decl))
&& !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl)))
{
gimplify_type_sizes (TREE_TYPE (decl), seq_p);
if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE)
gimplify_type_sizes (TREE_TYPE (TREE_TYPE (decl)), seq_p);
}
/* ??? DECL_ORIGINAL_TYPE is streamed for LTO so it needs to be gimplified
in case its size expressions contain problematic nodes like CALL_EXPR. */
if (TREE_CODE (decl) == TYPE_DECL
&& DECL_ORIGINAL_TYPE (decl)
&& !TYPE_SIZES_GIMPLIFIED (DECL_ORIGINAL_TYPE (decl)))
{
gimplify_type_sizes (DECL_ORIGINAL_TYPE (decl), seq_p);
if (TREE_CODE (DECL_ORIGINAL_TYPE (decl)) == REFERENCE_TYPE)
gimplify_type_sizes (TREE_TYPE (DECL_ORIGINAL_TYPE (decl)), seq_p);
}
if (VAR_P (decl) && !DECL_EXTERNAL (decl))
{
tree init = DECL_INITIAL (decl);
bool is_vla = false;
if (TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST
|| (!TREE_STATIC (decl)
&& flag_stack_check == GENERIC_STACK_CHECK
&& compare_tree_int (DECL_SIZE_UNIT (decl),
STACK_CHECK_MAX_VAR_SIZE) > 0))
{
gimplify_vla_decl (decl, seq_p);
is_vla = true;
}
if (asan_poisoned_variables
&& !is_vla
&& TREE_ADDRESSABLE (decl)
&& !TREE_STATIC (decl)
&& !DECL_HAS_VALUE_EXPR_P (decl)
&& dbg_cnt (asan_use_after_scope))
{
asan_poisoned_variables->add (decl);
asan_poison_variable (decl, false, seq_p);
if (!DECL_ARTIFICIAL (decl) && gimplify_ctxp->live_switch_vars)
gimplify_ctxp->live_switch_vars->add (decl);
}
/* Some front ends do not explicitly declare all anonymous
artificial variables. We compensate here by declaring the
variables, though it would be better if the front ends would
explicitly declare them. */
if (!DECL_SEEN_IN_BIND_EXPR_P (decl)
&& DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE)
gimple_add_tmp_var (decl);
if (init && init != error_mark_node)
{
if (!TREE_STATIC (decl))
{
DECL_INITIAL (decl) = NULL_TREE;
init = build2 (INIT_EXPR, void_type_node, decl, init);
gimplify_and_add (init, seq_p);
ggc_free (init);
}
else
/* We must still examine initializers for static variables
as they may contain a label address. */
walk_tree (&init, force_labels_r, NULL, NULL);
}
}
return GS_ALL_DONE;
}
/* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body
and replacing the LOOP_EXPR with goto, but if the loop contains an
EXIT_EXPR, we need to append a label for it to jump to. */
static enum gimplify_status
gimplify_loop_expr (tree *expr_p, gimple_seq *pre_p)
{
tree saved_label = gimplify_ctxp->exit_label;
tree start_label = create_artificial_label (UNKNOWN_LOCATION);
gimplify_seq_add_stmt (pre_p, gimple_build_label (start_label));
gimplify_ctxp->exit_label = NULL_TREE;
gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p);
gimplify_seq_add_stmt (pre_p, gimple_build_goto (start_label));
if (gimplify_ctxp->exit_label)
gimplify_seq_add_stmt (pre_p,
gimple_build_label (gimplify_ctxp->exit_label));
gimplify_ctxp->exit_label = saved_label;
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Gimplify a statement list onto a sequence. These may be created either
by an enlightened front-end, or by shortcut_cond_expr. */
static enum gimplify_status
gimplify_statement_list (tree *expr_p, gimple_seq *pre_p)
{
tree temp = voidify_wrapper_expr (*expr_p, NULL);
tree_stmt_iterator i = tsi_start (*expr_p);
while (!tsi_end_p (i))
{
gimplify_stmt (tsi_stmt_ptr (i), pre_p);
tsi_delink (&i);
}
if (temp)
{
*expr_p = temp;
return GS_OK;
}
return GS_ALL_DONE;
}
/* Callback for walk_gimple_seq. */
static tree
warn_switch_unreachable_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_TRY:
/* A compiler-generated cleanup or a user-written try block.
If it's empty, don't dive into it--that would result in
worse location info. */
if (gimple_try_eval (stmt) == NULL)
{
wi->info = stmt;
return integer_zero_node;
}
/* Fall through. */
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_TRANSACTION:
/* Walk the sub-statements. */
*handled_ops_p = false;
break;
case GIMPLE_CALL:
if (gimple_call_internal_p (stmt, IFN_ASAN_MARK))
{
*handled_ops_p = false;
break;
}
/* Fall through. */
default:
/* Save the first "real" statement (not a decl/lexical scope/...). */
wi->info = stmt;
return integer_zero_node;
}
return NULL_TREE;
}
/* Possibly warn about unreachable statements between switch's controlling
expression and the first case. SEQ is the body of a switch expression. */
static void
maybe_warn_switch_unreachable (gimple_seq seq)
{
if (!warn_switch_unreachable
/* This warning doesn't play well with Fortran when optimizations
are on. */
|| lang_GNU_Fortran ()
|| seq == NULL)
return;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
walk_gimple_seq (seq, warn_switch_unreachable_r, NULL, &wi);
gimple *stmt = (gimple *) wi.info;
if (stmt && gimple_code (stmt) != GIMPLE_LABEL)
{
if (gimple_code (stmt) == GIMPLE_GOTO
&& TREE_CODE (gimple_goto_dest (stmt)) == LABEL_DECL
&& DECL_ARTIFICIAL (gimple_goto_dest (stmt)))
/* Don't warn for compiler-generated gotos. These occur
in Duff's devices, for example. */;
else
warning_at (gimple_location (stmt), OPT_Wswitch_unreachable,
"statement will never be executed");
}
}
/* A label entry that pairs label and a location. */
struct label_entry
{
tree label;
location_t loc;
};
/* Find LABEL in vector of label entries VEC. */
static struct label_entry *
find_label_entry (const auto_vec<struct label_entry> *vec, tree label)
{
unsigned int i;
struct label_entry *l;
FOR_EACH_VEC_ELT (*vec, i, l)
if (l->label == label)
return l;
return NULL;
}
/* Return true if LABEL, a LABEL_DECL, represents a case label
in a vector of labels CASES. */
static bool
case_label_p (const vec<tree> *cases, tree label)
{
unsigned int i;
tree l;
FOR_EACH_VEC_ELT (*cases, i, l)
if (CASE_LABEL (l) == label)
return true;
return false;
}
/* Find the last statement in a scope STMT. */
static gimple *
last_stmt_in_scope (gimple *stmt)
{
if (!stmt)
return NULL;
switch (gimple_code (stmt))
{
case GIMPLE_BIND:
{
gbind *bind = as_a <gbind *> (stmt);
stmt = gimple_seq_last_stmt (gimple_bind_body (bind));
return last_stmt_in_scope (stmt);
}
case GIMPLE_TRY:
{
gtry *try_stmt = as_a <gtry *> (stmt);
stmt = gimple_seq_last_stmt (gimple_try_eval (try_stmt));
gimple *last_eval = last_stmt_in_scope (stmt);
if (gimple_stmt_may_fallthru (last_eval)
&& (last_eval == NULL
|| !gimple_call_internal_p (last_eval, IFN_FALLTHROUGH))
&& gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
{
stmt = gimple_seq_last_stmt (gimple_try_cleanup (try_stmt));
return last_stmt_in_scope (stmt);
}
else
return last_eval;
}
default:
return stmt;
}
}
/* Collect interesting labels in LABELS and return the statement preceding
another case label, or a user-defined label. */
static gimple *
collect_fallthrough_labels (gimple_stmt_iterator *gsi_p,
auto_vec <struct label_entry> *labels)
{
gimple *prev = NULL;
do
{
if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_BIND
|| gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_TRY)
{
/* Nested scope. Only look at the last statement of
the innermost scope. */
location_t bind_loc = gimple_location (gsi_stmt (*gsi_p));
gimple *last = last_stmt_in_scope (gsi_stmt (*gsi_p));
if (last)
{
prev = last;
/* It might be a label without a location. Use the
location of the scope then. */
if (!gimple_has_location (prev))
gimple_set_location (prev, bind_loc);
}
gsi_next (gsi_p);
continue;
}
/* Ifs are tricky. */
if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_COND)
{
gcond *cond_stmt = as_a <gcond *> (gsi_stmt (*gsi_p));
tree false_lab = gimple_cond_false_label (cond_stmt);
location_t if_loc = gimple_location (cond_stmt);
/* If we have e.g.
if (i > 1) goto <D.2259>; else goto D;
we can't do much with the else-branch. */
if (!DECL_ARTIFICIAL (false_lab))
break;
/* Go on until the false label, then one step back. */
for (; !gsi_end_p (*gsi_p); gsi_next (gsi_p))
{
gimple *stmt = gsi_stmt (*gsi_p);
if (gimple_code (stmt) == GIMPLE_LABEL
&& gimple_label_label (as_a <glabel *> (stmt)) == false_lab)
break;
}
/* Not found? Oops. */
if (gsi_end_p (*gsi_p))
break;
struct label_entry l = { false_lab, if_loc };
labels->safe_push (l);
/* Go to the last statement of the then branch. */
gsi_prev (gsi_p);
/* if (i != 0) goto <D.1759>; else goto <D.1760>;
<D.1759>:
<stmt>;
goto <D.1761>;
<D.1760>:
*/
if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_GOTO
&& !gimple_has_location (gsi_stmt (*gsi_p)))
{
/* Look at the statement before, it might be
attribute fallthrough, in which case don't warn. */
gsi_prev (gsi_p);
bool fallthru_before_dest
= gimple_call_internal_p (gsi_stmt (*gsi_p), IFN_FALLTHROUGH);
gsi_next (gsi_p);
tree goto_dest = gimple_goto_dest (gsi_stmt (*gsi_p));
if (!fallthru_before_dest)
{
struct label_entry l = { goto_dest, if_loc };
labels->safe_push (l);
}
}
/* And move back. */
gsi_next (gsi_p);
}
/* Remember the last statement. Skip labels that are of no interest
to us. */
if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_LABEL)
{
tree label = gimple_label_label (as_a <glabel *> (gsi_stmt (*gsi_p)));
if (find_label_entry (labels, label))
prev = gsi_stmt (*gsi_p);
}
else if (gimple_call_internal_p (gsi_stmt (*gsi_p), IFN_ASAN_MARK))
;
else
prev = gsi_stmt (*gsi_p);
gsi_next (gsi_p);
}
while (!gsi_end_p (*gsi_p)
/* Stop if we find a case or a user-defined label. */
&& (gimple_code (gsi_stmt (*gsi_p)) != GIMPLE_LABEL
|| !gimple_has_location (gsi_stmt (*gsi_p))));
return prev;
}
/* Return true if the switch fallthough warning should occur. LABEL is
the label statement that we're falling through to. */
static bool
should_warn_for_implicit_fallthrough (gimple_stmt_iterator *gsi_p, tree label)
{
gimple_stmt_iterator gsi = *gsi_p;
/* Don't warn if the label is marked with a "falls through" comment. */
if (FALLTHROUGH_LABEL_P (label))
return false;
/* Don't warn for non-case labels followed by a statement:
case 0:
foo ();
label:
bar ();
as these are likely intentional. */
if (!case_label_p (&gimplify_ctxp->case_labels, label))
{
tree l;
while (!gsi_end_p (gsi)
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
&& (l = gimple_label_label (as_a <glabel *> (gsi_stmt (gsi))))
&& !case_label_p (&gimplify_ctxp->case_labels, l))
gsi_next (&gsi);
if (gsi_end_p (gsi) || gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
return false;
}
/* Don't warn for terminated branches, i.e. when the subsequent case labels
immediately breaks. */
gsi = *gsi_p;
/* Skip all immediately following labels. */
while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
gsi_next (&gsi);
/* { ... something; default:; } */
if (gsi_end_p (gsi)
/* { ... something; default: break; } or
{ ... something; default: goto L; } */
|| gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
/* { ... something; default: return; } */
|| gimple_code (gsi_stmt (gsi)) == GIMPLE_RETURN)
return false;
return true;
}
/* Callback for walk_gimple_seq. */
static tree
warn_implicit_fallthrough_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_TRY:
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_TRANSACTION:
/* Walk the sub-statements. */
*handled_ops_p = false;
break;
/* Find a sequence of form:
GIMPLE_LABEL
[...]
<may fallthru stmt>
GIMPLE_LABEL
and possibly warn. */
case GIMPLE_LABEL:
{
/* Found a label. Skip all immediately following labels. */
while (!gsi_end_p (*gsi_p)
&& gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_LABEL)
gsi_next (gsi_p);
/* There might be no more statements. */
if (gsi_end_p (*gsi_p))
return integer_zero_node;
/* Vector of labels that fall through. */
auto_vec <struct label_entry> labels;
gimple *prev = collect_fallthrough_labels (gsi_p, &labels);
/* There might be no more statements. */
if (gsi_end_p (*gsi_p))
return integer_zero_node;
gimple *next = gsi_stmt (*gsi_p);
tree label;
/* If what follows is a label, then we may have a fallthrough. */
if (gimple_code (next) == GIMPLE_LABEL
&& gimple_has_location (next)
&& (label = gimple_label_label (as_a <glabel *> (next)))
&& prev != NULL)
{
struct label_entry *l;
bool warned_p = false;
if (!should_warn_for_implicit_fallthrough (gsi_p, label))
/* Quiet. */;
else if (gimple_code (prev) == GIMPLE_LABEL
&& (label = gimple_label_label (as_a <glabel *> (prev)))
&& (l = find_label_entry (&labels, label)))
warned_p = warning_at (l->loc, OPT_Wimplicit_fallthrough_,
"this statement may fall through");
else if (!gimple_call_internal_p (prev, IFN_FALLTHROUGH)
/* Try to be clever and don't warn when the statement
can't actually fall through. */
&& gimple_stmt_may_fallthru (prev)
&& gimple_has_location (prev))
warned_p = warning_at (gimple_location (prev),
OPT_Wimplicit_fallthrough_,
"this statement may fall through");
if (warned_p)
inform (gimple_location (next), "here");
/* Mark this label as processed so as to prevent multiple
warnings in nested switches. */
FALLTHROUGH_LABEL_P (label) = true;
/* So that next warn_implicit_fallthrough_r will start looking for
a new sequence starting with this label. */
gsi_prev (gsi_p);
}
}
break;
default:
break;
}
return NULL_TREE;
}
/* Warn when a switch case falls through. */
static void
maybe_warn_implicit_fallthrough (gimple_seq seq)
{
if (!warn_implicit_fallthrough)
return;
/* This warning is meant for C/C++/ObjC/ObjC++ only. */
if (!(lang_GNU_C ()
|| lang_GNU_CXX ()
|| lang_GNU_OBJC ()))
return;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
walk_gimple_seq (seq, warn_implicit_fallthrough_r, NULL, &wi);
}
/* Callback for walk_gimple_seq. */
static tree
expand_FALLTHROUGH_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_TRY:
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_TRANSACTION:
/* Walk the sub-statements. */
*handled_ops_p = false;
break;
case GIMPLE_CALL:
if (gimple_call_internal_p (stmt, IFN_FALLTHROUGH))
{
gsi_remove (gsi_p, true);
if (gsi_end_p (*gsi_p))
return integer_zero_node;
bool found = false;
location_t loc = gimple_location (stmt);
gimple_stmt_iterator gsi2 = *gsi_p;
stmt = gsi_stmt (gsi2);
if (gimple_code (stmt) == GIMPLE_GOTO && !gimple_has_location (stmt))
{
/* Go on until the artificial label. */
tree goto_dest = gimple_goto_dest (stmt);
for (; !gsi_end_p (gsi2); gsi_next (&gsi2))
{
if (gimple_code (gsi_stmt (gsi2)) == GIMPLE_LABEL
&& gimple_label_label (as_a <glabel *> (gsi_stmt (gsi2)))
== goto_dest)
break;
}
/* Not found? Stop. */
if (gsi_end_p (gsi2))
break;
/* Look one past it. */
gsi_next (&gsi2);
}
/* We're looking for a case label or default label here. */
while (!gsi_end_p (gsi2))
{
stmt = gsi_stmt (gsi2);
if (gimple_code (stmt) == GIMPLE_LABEL)
{
tree label = gimple_label_label (as_a <glabel *> (stmt));
if (gimple_has_location (stmt) && DECL_ARTIFICIAL (label))
{
found = true;
break;
}
}
else if (gimple_call_internal_p (stmt, IFN_ASAN_MARK))
;
else
/* Something other is not expected. */
break;
gsi_next (&gsi2);
}
if (!found)
warning_at (loc, 0, "attribute %<fallthrough%> not preceding "
"a case label or default label");
}
break;
default:
break;
}
return NULL_TREE;
}
/* Expand all FALLTHROUGH () calls in SEQ. */
static void
expand_FALLTHROUGH (gimple_seq *seq_p)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
walk_gimple_seq_mod (seq_p, expand_FALLTHROUGH_r, NULL, &wi);
}
/* Gimplify a SWITCH_EXPR, and collect the vector of labels it can
branch to. */
static enum gimplify_status
gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
{
tree switch_expr = *expr_p;
gimple_seq switch_body_seq = NULL;
enum gimplify_status ret;
tree index_type = TREE_TYPE (switch_expr);
if (index_type == NULL_TREE)
index_type = TREE_TYPE (SWITCH_COND (switch_expr));
ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val,
fb_rvalue);
if (ret == GS_ERROR || ret == GS_UNHANDLED)
return ret;
if (SWITCH_BODY (switch_expr))
{
vec<tree> labels;
vec<tree> saved_labels;
hash_set<tree> *saved_live_switch_vars = NULL;
tree default_case = NULL_TREE;
gswitch *switch_stmt;
/* If someone can be bothered to fill in the labels, they can
be bothered to null out the body too. */
gcc_assert (!SWITCH_LABELS (switch_expr));
/* Save old labels, get new ones from body, then restore the old
labels. Save all the things from the switch body to append after. */
saved_labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels.create (8);
/* Do not create live_switch_vars if SWITCH_BODY is not a BIND_EXPR. */
saved_live_switch_vars = gimplify_ctxp->live_switch_vars;
tree_code body_type = TREE_CODE (SWITCH_BODY (switch_expr));
if (body_type == BIND_EXPR || body_type == STATEMENT_LIST)
gimplify_ctxp->live_switch_vars = new hash_set<tree> (4);
else
gimplify_ctxp->live_switch_vars = NULL;
bool old_in_switch_expr = gimplify_ctxp->in_switch_expr;
gimplify_ctxp->in_switch_expr = true;
gimplify_stmt (&SWITCH_BODY (switch_expr), &switch_body_seq);
gimplify_ctxp->in_switch_expr = old_in_switch_expr;
maybe_warn_switch_unreachable (switch_body_seq);
maybe_warn_implicit_fallthrough (switch_body_seq);
/* Only do this for the outermost GIMPLE_SWITCH. */
if (!gimplify_ctxp->in_switch_expr)
expand_FALLTHROUGH (&switch_body_seq);
labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = saved_labels;
if (gimplify_ctxp->live_switch_vars)
{
gcc_assert (gimplify_ctxp->live_switch_vars->elements () == 0);
delete gimplify_ctxp->live_switch_vars;
}
gimplify_ctxp->live_switch_vars = saved_live_switch_vars;
preprocess_case_label_vec_for_gimple (labels, index_type,
&default_case);
if (!default_case)
{
glabel *new_default;
default_case
= build_case_label (NULL_TREE, NULL_TREE,
create_artificial_label (UNKNOWN_LOCATION));
new_default = gimple_build_label (CASE_LABEL (default_case));
gimplify_seq_add_stmt (&switch_body_seq, new_default);
}
switch_stmt = gimple_build_switch (SWITCH_COND (switch_expr),
default_case, labels);
gimplify_seq_add_stmt (pre_p, switch_stmt);
gimplify_seq_add_seq (pre_p, switch_body_seq);
labels.release ();
}
else
gcc_assert (SWITCH_LABELS (switch_expr));
return GS_ALL_DONE;
}
/* Gimplify the LABEL_EXPR pointed to by EXPR_P. */
static enum gimplify_status
gimplify_label_expr (tree *expr_p, gimple_seq *pre_p)
{
gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p))
== current_function_decl);
glabel *label_stmt = gimple_build_label (LABEL_EXPR_LABEL (*expr_p));
gimple_set_location (label_stmt, EXPR_LOCATION (*expr_p));
gimplify_seq_add_stmt (pre_p, label_stmt);
return GS_ALL_DONE;
}
/* Gimplify the CASE_LABEL_EXPR pointed to by EXPR_P. */
static enum gimplify_status
gimplify_case_label_expr (tree *expr_p, gimple_seq *pre_p)
{
struct gimplify_ctx *ctxp;
glabel *label_stmt;
/* Invalid programs can play Duff's Device type games with, for example,
#pragma omp parallel. At least in the C front end, we don't
detect such invalid branches until after gimplification, in the
diagnose_omp_blocks pass. */
for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context)
if (ctxp->case_labels.exists ())
break;
label_stmt = gimple_build_label (CASE_LABEL (*expr_p));
gimple_set_location (label_stmt, EXPR_LOCATION (*expr_p));
ctxp->case_labels.safe_push (*expr_p);
gimplify_seq_add_stmt (pre_p, label_stmt);
return GS_ALL_DONE;
}
/* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first
if necessary. */
tree
build_and_jump (tree *label_p)
{
if (label_p == NULL)
/* If there's nowhere to jump, just fall through. */
return NULL_TREE;
if (*label_p == NULL_TREE)
{
tree label = create_artificial_label (UNKNOWN_LOCATION);
*label_p = label;
}
return build1 (GOTO_EXPR, void_type_node, *label_p);
}
/* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR.
This also involves building a label to jump to and communicating it to
gimplify_loop_expr through gimplify_ctxp->exit_label. */
static enum gimplify_status
gimplify_exit_expr (tree *expr_p)
{
tree cond = TREE_OPERAND (*expr_p, 0);
tree expr;
expr = build_and_jump (&gimplify_ctxp->exit_label);
expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE);
*expr_p = expr;
return GS_OK;
}
/* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is
different from its canonical type, wrap the whole thing inside a
NOP_EXPR and force the type of the COMPONENT_REF to be the canonical
type.
The canonical type of a COMPONENT_REF is the type of the field being
referenced--unless the field is a bit-field which can be read directly
in a smaller mode, in which case the canonical type is the
sign-appropriate type corresponding to that mode. */
static void
canonicalize_component_ref (tree *expr_p)
{
tree expr = *expr_p;
tree type;
gcc_assert (TREE_CODE (expr) == COMPONENT_REF);
if (INTEGRAL_TYPE_P (TREE_TYPE (expr)))
type = TREE_TYPE (get_unwidened (expr, NULL_TREE));
else
type = TREE_TYPE (TREE_OPERAND (expr, 1));
/* One could argue that all the stuff below is not necessary for
the non-bitfield case and declare it a FE error if type
adjustment would be needed. */
if (TREE_TYPE (expr) != type)
{
#ifdef ENABLE_TYPES_CHECKING
tree old_type = TREE_TYPE (expr);
#endif
int type_quals;
/* We need to preserve qualifiers and propagate them from
operand 0. */
type_quals = TYPE_QUALS (type)
| TYPE_QUALS (TREE_TYPE (TREE_OPERAND (expr, 0)));
if (TYPE_QUALS (type) != type_quals)
type = build_qualified_type (TYPE_MAIN_VARIANT (type), type_quals);
/* Set the type of the COMPONENT_REF to the underlying type. */
TREE_TYPE (expr) = type;
#ifdef ENABLE_TYPES_CHECKING
/* It is now a FE error, if the conversion from the canonical
type to the original expression type is not useless. */
gcc_assert (useless_type_conversion_p (old_type, type));
#endif
}
}
/* If a NOP conversion is changing a pointer to array of foo to a pointer
to foo, embed that change in the ADDR_EXPR by converting
T array[U];
(T *)&array
==>
&array[L]
where L is the lower bound. For simplicity, only do this for constant
lower bound.
The constraint is that the type of &array[L] is trivially convertible
to T *. */
static void
canonicalize_addr_expr (tree *expr_p)
{
tree expr = *expr_p;
tree addr_expr = TREE_OPERAND (expr, 0);
tree datype, ddatype, pddatype;
/* We simplify only conversions from an ADDR_EXPR to a pointer type. */
if (!POINTER_TYPE_P (TREE_TYPE (expr))
|| TREE_CODE (addr_expr) != ADDR_EXPR)
return;
/* The addr_expr type should be a pointer to an array. */
datype = TREE_TYPE (TREE_TYPE (addr_expr));
if (TREE_CODE (datype) != ARRAY_TYPE)
return;
/* The pointer to element type shall be trivially convertible to
the expression pointer type. */
ddatype = TREE_TYPE (datype);
pddatype = build_pointer_type (ddatype);
if (!useless_type_conversion_p (TYPE_MAIN_VARIANT (TREE_TYPE (expr)),
pddatype))
return;
/* The lower bound and element sizes must be constant. */
if (!TYPE_SIZE_UNIT (ddatype)
|| TREE_CODE (TYPE_SIZE_UNIT (ddatype)) != INTEGER_CST
|| !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype))
|| TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST)
return;
/* All checks succeeded. Build a new node to merge the cast. */
*expr_p = build4 (ARRAY_REF, ddatype, TREE_OPERAND (addr_expr, 0),
TYPE_MIN_VALUE (TYPE_DOMAIN (datype)),
NULL_TREE, NULL_TREE);
*expr_p = build1 (ADDR_EXPR, pddatype, *expr_p);
/* We can have stripped a required restrict qualifier above. */
if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p)))
*expr_p = fold_convert (TREE_TYPE (expr), *expr_p);
}
/* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions
underneath as appropriate. */
static enum gimplify_status
gimplify_conversion (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
gcc_assert (CONVERT_EXPR_P (*expr_p));
/* Then strip away all but the outermost conversion. */
STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0));
/* And remove the outermost conversion if it's useless. */
if (tree_ssa_useless_type_conversion (*expr_p))
*expr_p = TREE_OPERAND (*expr_p, 0);
/* If we still have a conversion at the toplevel,
then canonicalize some constructs. */
if (CONVERT_EXPR_P (*expr_p))
{
tree sub = TREE_OPERAND (*expr_p, 0);
/* If a NOP conversion is changing the type of a COMPONENT_REF
expression, then canonicalize its type now in order to expose more
redundant conversions. */
if (TREE_CODE (sub) == COMPONENT_REF)
canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0));
/* If a NOP conversion is changing a pointer to array of foo
to a pointer to foo, embed that change in the ADDR_EXPR. */
else if (TREE_CODE (sub) == ADDR_EXPR)
canonicalize_addr_expr (expr_p);
}
/* If we have a conversion to a non-register type force the
use of a VIEW_CONVERT_EXPR instead. */
if (CONVERT_EXPR_P (*expr_p) && !is_gimple_reg_type (TREE_TYPE (*expr_p)))
*expr_p = fold_build1_loc (loc, VIEW_CONVERT_EXPR, TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0));
/* Canonicalize CONVERT_EXPR to NOP_EXPR. */
if (TREE_CODE (*expr_p) == CONVERT_EXPR)
TREE_SET_CODE (*expr_p, NOP_EXPR);
return GS_OK;
}
/* Nonlocal VLAs seen in the current function. */
static hash_set<tree> *nonlocal_vlas;
/* The VAR_DECLs created for nonlocal VLAs for debug info purposes. */
static tree nonlocal_vla_vars;
/* Gimplify a VAR_DECL or PARM_DECL. Return GS_OK if we expanded a
DECL_VALUE_EXPR, and it's worth re-examining things. */
static enum gimplify_status
gimplify_var_or_parm_decl (tree *expr_p)
{
tree decl = *expr_p;
/* ??? If this is a local variable, and it has not been seen in any
outer BIND_EXPR, then it's probably the result of a duplicate
declaration, for which we've already issued an error. It would
be really nice if the front end wouldn't leak these at all.
Currently the only known culprit is C++ destructors, as seen
in g++.old-deja/g++.jason/binding.C. */
if (VAR_P (decl)
&& !DECL_SEEN_IN_BIND_EXPR_P (decl)
&& !TREE_STATIC (decl) && !DECL_EXTERNAL (decl)
&& decl_function_context (decl) == current_function_decl)
{
gcc_assert (seen_error ());
return GS_ERROR;
}
/* When within an OMP context, notice uses of variables. */
if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true))
return GS_ALL_DONE;
/* If the decl is an alias for another expression, substitute it now. */
if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value_expr = DECL_VALUE_EXPR (decl);
/* For referenced nonlocal VLAs add a decl for debugging purposes
to the current function. */
if (VAR_P (decl)
&& TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST
&& nonlocal_vlas != NULL
&& TREE_CODE (value_expr) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (value_expr, 0)) == VAR_DECL
&& decl_function_context (decl) != current_function_decl)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
while (ctx
&& (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_SIMD
|| ctx->region_type == ORT_ACC))
ctx = ctx->outer_context;
if (!ctx && !nonlocal_vlas->add (decl))
{
tree copy = copy_node (decl);
lang_hooks.dup_lang_specific_decl (copy);
SET_DECL_RTL (copy, 0);
TREE_USED (copy) = 1;
DECL_CHAIN (copy) = nonlocal_vla_vars;
nonlocal_vla_vars = copy;
SET_DECL_VALUE_EXPR (copy, unshare_expr (value_expr));
DECL_HAS_VALUE_EXPR_P (copy) = 1;
}
}
*expr_p = unshare_expr (value_expr);
return GS_OK;
}
return GS_ALL_DONE;
}
/* Recalculate the value of the TREE_SIDE_EFFECTS flag for T. */
static void
recalculate_side_effects (tree t)
{
enum tree_code code = TREE_CODE (t);
int len = TREE_OPERAND_LENGTH (t);
int i;
switch (TREE_CODE_CLASS (code))
{
case tcc_expression:
switch (code)
{
case INIT_EXPR:
case MODIFY_EXPR:
case VA_ARG_EXPR:
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
/* All of these have side-effects, no matter what their
operands are. */
return;
default:
break;
}
/* Fall through. */
case tcc_comparison: /* a comparison expression */
case tcc_unary: /* a unary arithmetic expression */
case tcc_binary: /* a binary arithmetic expression */
case tcc_reference: /* a reference */
case tcc_vl_exp: /* a function call */
TREE_SIDE_EFFECTS (t) = TREE_THIS_VOLATILE (t);
for (i = 0; i < len; ++i)
{
tree op = TREE_OPERAND (t, i);
if (op && TREE_SIDE_EFFECTS (op))
TREE_SIDE_EFFECTS (t) = 1;
}
break;
case tcc_constant:
/* No side-effects. */
return;
default:
gcc_unreachable ();
}
}
/* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR
node *EXPR_P.
compound_lval
: min_lval '[' val ']'
| min_lval '.' ID
| compound_lval '[' val ']'
| compound_lval '.' ID
This is not part of the original SIMPLE definition, which separates
array and member references, but it seems reasonable to handle them
together. Also, this way we don't run into problems with union
aliasing; gcc requires that for accesses through a union to alias, the
union reference must be explicit, which was not always the case when we
were splitting up array and member refs.
PRE_P points to the sequence where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the sequence where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
fallback_t fallback)
{
tree *p;
enum gimplify_status ret = GS_ALL_DONE, tret;
int i;
location_t loc = EXPR_LOCATION (*expr_p);
tree expr = *expr_p;
/* Create a stack of the subexpressions so later we can walk them in
order from inner to outer. */
auto_vec<tree, 10> expr_stack;
/* We can handle anything that get_inner_reference can deal with. */
for (p = expr_p; ; p = &TREE_OPERAND (*p, 0))
{
restart:
/* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */
if (TREE_CODE (*p) == INDIRECT_REF)
*p = fold_indirect_ref_loc (loc, *p);
if (handled_component_p (*p))
;
/* Expand DECL_VALUE_EXPR now. In some cases that may expose
additional COMPONENT_REFs. */
else if ((VAR_P (*p) || TREE_CODE (*p) == PARM_DECL)
&& gimplify_var_or_parm_decl (p) == GS_OK)
goto restart;
else
break;
expr_stack.safe_push (*p);
}
gcc_assert (expr_stack.length ());
/* Now EXPR_STACK is a stack of pointers to all the refs we've
walked through and P points to the innermost expression.
Java requires that we elaborated nodes in source order. That
means we must gimplify the inner expression followed by each of
the indices, in order. But we can't gimplify the inner
expression until we deal with any variable bounds, sizes, or
positions in order to deal with PLACEHOLDER_EXPRs.
So we do this in three steps. First we deal with the annotations
for any variables in the components, then we gimplify the base,
then we gimplify any indices, from left to right. */
for (i = expr_stack.length () - 1; i >= 0; i--)
{
tree t = expr_stack[i];
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the low bound and element type size and put them into
the ARRAY_REF. If these values are set, they have already been
gimplified. */
if (TREE_OPERAND (t, 2) == NULL_TREE)
{
tree low = unshare_expr (array_ref_low_bound (t));
if (!is_gimple_min_invariant (low))
{
TREE_OPERAND (t, 2) = low;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
if (TREE_OPERAND (t, 3) == NULL_TREE)
{
tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0)));
tree elmt_size = unshare_expr (array_ref_element_size (t));
tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type));
/* Divide the element size by the alignment of the element
type (above). */
elmt_size
= size_binop_loc (loc, EXACT_DIV_EXPR, elmt_size, factor);
if (!is_gimple_min_invariant (elmt_size))
{
TREE_OPERAND (t, 3) = elmt_size;
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
else if (TREE_CODE (t) == COMPONENT_REF)
{
/* Set the field offset into T and gimplify it. */
if (TREE_OPERAND (t, 2) == NULL_TREE)
{
tree offset = unshare_expr (component_ref_field_offset (t));
tree field = TREE_OPERAND (t, 1);
tree factor
= size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT);
/* Divide the offset by its alignment. */
offset = size_binop_loc (loc, EXACT_DIV_EXPR, offset, factor);
if (!is_gimple_min_invariant (offset))
{
TREE_OPERAND (t, 2) = offset;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
}
/* Step 2 is to gimplify the base expression. Make sure lvalue is set
so as to match the min_lval predicate. Failure to do so may result
in the creation of large aggregate temporaries. */
tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval,
fallback | fb_lvalue);
ret = MIN (ret, tret);
/* And finally, the indices and operands of ARRAY_REF. During this
loop we also remove any useless conversions. */
for (; expr_stack.length () > 0; )
{
tree t = expr_stack.pop ();
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the dimension. */
if (!is_gimple_min_invariant (TREE_OPERAND (t, 1)))
{
tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
}
}
STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0));
/* The innermost expression P may have originally had
TREE_SIDE_EFFECTS set which would have caused all the outer
expressions in *EXPR_P leading to P to also have had
TREE_SIDE_EFFECTS set. */
recalculate_side_effects (t);
}
/* If the outermost expression is a COMPONENT_REF, canonicalize its type. */
if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF)
{
canonicalize_component_ref (expr_p);
}
expr_stack.release ();
gcc_assert (*expr_p == expr || ret != GS_ALL_DONE);
return ret;
}
/* Gimplify the self modifying expression pointed to by EXPR_P
(++, --, +=, -=).
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression.
ARITH_TYPE is the type the computation should be performed in. */
enum gimplify_status
gimplify_self_mod_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value, tree arith_type)
{
enum tree_code code;
tree lhs, lvalue, rhs, t1;
gimple_seq post = NULL, *orig_post_p = post_p;
bool postfix;
enum tree_code arith_code;
enum gimplify_status ret;
location_t loc = EXPR_LOCATION (*expr_p);
code = TREE_CODE (*expr_p);
gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR
|| code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR);
/* Prefix or postfix? */
if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR)
/* Faster to treat as prefix if result is not used. */
postfix = want_value;
else
postfix = false;
/* For postfix, make sure the inner expression's post side effects
are executed after side effects from this expression. */
if (postfix)
post_p = &post;
/* Add or subtract? */
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
arith_code = PLUS_EXPR;
else
arith_code = MINUS_EXPR;
/* Gimplify the LHS into a GIMPLE lvalue. */
lvalue = TREE_OPERAND (*expr_p, 0);
ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
/* Extract the operands to the arithmetic operation. */
lhs = lvalue;
rhs = TREE_OPERAND (*expr_p, 1);
/* For postfix operator, we evaluate the LHS to an rvalue and then use
that as the result value and in the postqueue operation. */
if (postfix)
{
ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
return ret;
lhs = get_initialized_tmp_var (lhs, pre_p, NULL);
}
/* For POINTERs increment, use POINTER_PLUS_EXPR. */
if (POINTER_TYPE_P (TREE_TYPE (lhs)))
{
rhs = convert_to_ptrofftype_loc (loc, rhs);
if (arith_code == MINUS_EXPR)
rhs = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (rhs), rhs);
t1 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (*expr_p), lhs, rhs);
}
else
t1 = fold_convert (TREE_TYPE (*expr_p),
fold_build2 (arith_code, arith_type,
fold_convert (arith_type, lhs),
fold_convert (arith_type, rhs)));
if (postfix)
{
gimplify_assign (lvalue, t1, pre_p);
gimplify_seq_add_seq (orig_post_p, post);
*expr_p = lhs;
return GS_ALL_DONE;
}
else
{
*expr_p = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1);
return GS_OK;
}
}
/* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */
static void
maybe_with_size_expr (tree *expr_p)
{
tree expr = *expr_p;
tree type = TREE_TYPE (expr);
tree size;
/* If we've already wrapped this or the type is error_mark_node, we can't do
anything. */
if (TREE_CODE (expr) == WITH_SIZE_EXPR
|| type == error_mark_node)
return;
/* If the size isn't known or is a constant, we have nothing to do. */
size = TYPE_SIZE_UNIT (type);
if (!size || TREE_CODE (size) == INTEGER_CST)
return;
/* Otherwise, make a WITH_SIZE_EXPR. */
size = unshare_expr (size);
size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr);
*expr_p = build2 (WITH_SIZE_EXPR, type, expr, size);
}
/* Helper for gimplify_call_expr. Gimplify a single argument *ARG_P
Store any side-effects in PRE_P. CALL_LOCATION is the location of
the CALL_EXPR. If ALLOW_SSA is set the actual parameter may be
gimplified to an SSA name. */
enum gimplify_status
gimplify_arg (tree *arg_p, gimple_seq *pre_p, location_t call_location,
bool allow_ssa)
{
bool (*test) (tree);
fallback_t fb;
/* In general, we allow lvalues for function arguments to avoid
extra overhead of copying large aggregates out of even larger
aggregates into temporaries only to copy the temporaries to
the argument list. Make optimizers happy by pulling out to
temporaries those types that fit in registers. */
if (is_gimple_reg_type (TREE_TYPE (*arg_p)))
test = is_gimple_val, fb = fb_rvalue;
else
{
test = is_gimple_lvalue, fb = fb_either;
/* Also strip a TARGET_EXPR that would force an extra copy. */
if (TREE_CODE (*arg_p) == TARGET_EXPR)
{
tree init = TARGET_EXPR_INITIAL (*arg_p);
if (init
&& !VOID_TYPE_P (TREE_TYPE (init)))
*arg_p = init;
}
}
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (arg_p);
/* FIXME diagnostics: This will mess up gcc.dg/Warray-bounds.c. */
/* Make sure arguments have the same location as the function call
itself. */
protected_set_expr_location (*arg_p, call_location);
/* There is a sequence point before a function call. Side effects in
the argument list must occur before the actual call. So, when
gimplifying arguments, force gimplify_expr to use an internal
post queue which is then appended to the end of PRE_P. */
return gimplify_expr (arg_p, pre_p, NULL, test, fb, allow_ssa);
}
/* Don't fold inside offloading or taskreg regions: it can break code by
adding decl references that weren't in the source. We'll do it during
omplower pass instead. */
static bool
maybe_fold_stmt (gimple_stmt_iterator *gsi)
{
struct gimplify_omp_ctx *ctx;
for (ctx = gimplify_omp_ctxp; ctx; ctx = ctx->outer_context)
if ((ctx->region_type & (ORT_TARGET | ORT_PARALLEL | ORT_TASK)) != 0)
return false;
return fold_stmt (gsi);
}
/* Gimplify the CALL_EXPR node *EXPR_P into the GIMPLE sequence PRE_P.
WANT_VALUE is true if the result of the call is desired. */
static enum gimplify_status
gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
{
tree fndecl, parms, p, fnptrtype;
enum gimplify_status ret;
int i, nargs;
gcall *call;
bool builtin_va_start_p = false;
location_t loc = EXPR_LOCATION (*expr_p);
gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR);
/* For reliable diagnostics during inlining, it is necessary that
every call_expr be annotated with file and line. */
if (! EXPR_HAS_LOCATION (*expr_p))
SET_EXPR_LOCATION (*expr_p, input_location);
/* Gimplify internal functions created in the FEs. */
if (CALL_EXPR_FN (*expr_p) == NULL_TREE)
{
if (want_value)
return GS_ALL_DONE;
nargs = call_expr_nargs (*expr_p);
enum internal_fn ifn = CALL_EXPR_IFN (*expr_p);
auto_vec<tree> vargs (nargs);
for (i = 0; i < nargs; i++)
{
gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p,
EXPR_LOCATION (*expr_p));
vargs.quick_push (CALL_EXPR_ARG (*expr_p, i));
}
gimple *call = gimple_build_call_internal_vec (ifn, vargs);
gimplify_seq_add_stmt (pre_p, call);
return GS_ALL_DONE;
}
/* This may be a call to a builtin function.
Builtin function calls may be transformed into different
(and more efficient) builtin function calls under certain
circumstances. Unfortunately, gimplification can muck things
up enough that the builtin expanders are not aware that certain
transformations are still valid.
So we attempt transformation/gimplification of the call before
we gimplify the CALL_EXPR. At this time we do not manage to
transform all calls in the same manner as the expanders do, but
we do transform most of them. */
fndecl = get_callee_fndecl (*expr_p);
if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_ALLOCA:
case BUILT_IN_ALLOCA_WITH_ALIGN:
/* If the call has been built for a variable-sized object, then we
want to restore the stack level when the enclosing BIND_EXPR is
exited to reclaim the allocated space; otherwise, we precisely
need to do the opposite and preserve the latest stack level. */
if (CALL_ALLOCA_FOR_VAR_P (*expr_p))
gimplify_ctxp->save_stack = true;
else
gimplify_ctxp->keep_stack = true;
break;
case BUILT_IN_VA_START:
{
builtin_va_start_p = TRUE;
if (call_expr_nargs (*expr_p) < 2)
{
error ("too few arguments to function %<va_start%>");
*expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p));
return GS_OK;
}
if (fold_builtin_next_arg (*expr_p, true))
{
*expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p));
return GS_OK;
}
break;
}
default:
;
}
if (fndecl && DECL_BUILT_IN (fndecl))
{
tree new_tree = fold_call_expr (input_location, *expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new_tree;
return GS_OK;
}
}
/* Remember the original function pointer type. */
fnptrtype = TREE_TYPE (CALL_EXPR_FN (*expr_p));
/* There is a sequence point before the call, so any side effects in
the calling expression must occur before the actual call. Force
gimplify_expr to use an internal post queue. */
ret = gimplify_expr (&CALL_EXPR_FN (*expr_p), pre_p, NULL,
is_gimple_call_addr, fb_rvalue);
nargs = call_expr_nargs (*expr_p);
/* Get argument types for verification. */
fndecl = get_callee_fndecl (*expr_p);
parms = NULL_TREE;
if (fndecl)
parms = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
else
parms = TYPE_ARG_TYPES (TREE_TYPE (fnptrtype));
if (fndecl && DECL_ARGUMENTS (fndecl))
p = DECL_ARGUMENTS (fndecl);
else if (parms)
p = parms;
else
p = NULL_TREE;
for (i = 0; i < nargs && p; i++, p = TREE_CHAIN (p))
;
/* If the last argument is __builtin_va_arg_pack () and it is not
passed as a named argument, decrease the number of CALL_EXPR
arguments and set instead the CALL_EXPR_VA_ARG_PACK flag. */
if (!p
&& i < nargs
&& TREE_CODE (CALL_EXPR_ARG (*expr_p, nargs - 1)) == CALL_EXPR)
{
tree last_arg = CALL_EXPR_ARG (*expr_p, nargs - 1);
tree last_arg_fndecl = get_callee_fndecl (last_arg);
if (last_arg_fndecl
&& TREE_CODE (last_arg_fndecl) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (last_arg_fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (last_arg_fndecl) == BUILT_IN_VA_ARG_PACK)
{
tree call = *expr_p;
--nargs;
*expr_p = build_call_array_loc (loc, TREE_TYPE (call),
CALL_EXPR_FN (call),
nargs, CALL_EXPR_ARGP (call));
/* Copy all CALL_EXPR flags, location and block, except
CALL_EXPR_VA_ARG_PACK flag. */
CALL_EXPR_STATIC_CHAIN (*expr_p) = CALL_EXPR_STATIC_CHAIN (call);
CALL_EXPR_TAILCALL (*expr_p) = CALL_EXPR_TAILCALL (call);
CALL_EXPR_RETURN_SLOT_OPT (*expr_p)
= CALL_EXPR_RETURN_SLOT_OPT (call);
CALL_FROM_THUNK_P (*expr_p) = CALL_FROM_THUNK_P (call);
SET_EXPR_LOCATION (*expr_p, EXPR_LOCATION (call));
/* Set CALL_EXPR_VA_ARG_PACK. */
CALL_EXPR_VA_ARG_PACK (*expr_p) = 1;
}
}
/* If the call returns twice then after building the CFG the call
argument computations will no longer dominate the call because
we add an abnormal incoming edge to the call. So do not use SSA
vars there. */
bool returns_twice = call_expr_flags (*expr_p) & ECF_RETURNS_TWICE;
/* Gimplify the function arguments. */
if (nargs > 0)
{
for (i = (PUSH_ARGS_REVERSED ? nargs - 1 : 0);
PUSH_ARGS_REVERSED ? i >= 0 : i < nargs;
PUSH_ARGS_REVERSED ? i-- : i++)
{
enum gimplify_status t;
/* Avoid gimplifying the second argument to va_start, which needs to
be the plain PARM_DECL. */
if ((i != 1) || !builtin_va_start_p)
{
t = gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p,
EXPR_LOCATION (*expr_p), ! returns_twice);
if (t == GS_ERROR)
ret = GS_ERROR;
}
}
}
/* Gimplify the static chain. */
if (CALL_EXPR_STATIC_CHAIN (*expr_p))
{
if (fndecl && !DECL_STATIC_CHAIN (fndecl))
CALL_EXPR_STATIC_CHAIN (*expr_p) = NULL;
else
{
enum gimplify_status t;
t = gimplify_arg (&CALL_EXPR_STATIC_CHAIN (*expr_p), pre_p,
EXPR_LOCATION (*expr_p), ! returns_twice);
if (t == GS_ERROR)
ret = GS_ERROR;
}
}
/* Verify the function result. */
if (want_value && fndecl
&& VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fnptrtype))))
{
error_at (loc, "using result of function returning %<void%>");
ret = GS_ERROR;
}
/* Try this again in case gimplification exposed something. */
if (ret != GS_ERROR)
{
tree new_tree = fold_call_expr (input_location, *expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new_tree;
return GS_OK;
}
}
else
{
*expr_p = error_mark_node;
return GS_ERROR;
}
/* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its
decl. This allows us to eliminate redundant or useless
calls to "const" functions. */
if (TREE_CODE (*expr_p) == CALL_EXPR)
{
int flags = call_expr_flags (*expr_p);
if (flags & (ECF_CONST | ECF_PURE)
/* An infinite loop is considered a side effect. */
&& !(flags & (ECF_LOOPING_CONST_OR_PURE)))
TREE_SIDE_EFFECTS (*expr_p) = 0;
}
/* If the value is not needed by the caller, emit a new GIMPLE_CALL
and clear *EXPR_P. Otherwise, leave *EXPR_P in its gimplified
form and delegate the creation of a GIMPLE_CALL to
gimplify_modify_expr. This is always possible because when
WANT_VALUE is true, the caller wants the result of this call into
a temporary, which means that we will emit an INIT_EXPR in
internal_get_tmp_var which will then be handled by
gimplify_modify_expr. */
if (!want_value)
{
/* The CALL_EXPR in *EXPR_P is already in GIMPLE form, so all we
have to do is replicate it as a GIMPLE_CALL tuple. */
gimple_stmt_iterator gsi;
call = gimple_build_call_from_tree (*expr_p);
gimple_call_set_fntype (call, TREE_TYPE (fnptrtype));
notice_special_calls (call);
gimplify_seq_add_stmt (pre_p, call);
gsi = gsi_last (*pre_p);
maybe_fold_stmt (&gsi);
*expr_p = NULL_TREE;
}
else
/* Remember the original function type. */
CALL_EXPR_FN (*expr_p) = build1 (NOP_EXPR, fnptrtype,
CALL_EXPR_FN (*expr_p));
return ret;
}
/* Handle shortcut semantics in the predicate operand of a COND_EXPR by
rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs.
TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the
condition is true or false, respectively. If null, we should generate
our own to skip over the evaluation of this specific expression.
LOCUS is the source location of the COND_EXPR.
This function is the tree equivalent of do_jump.
shortcut_cond_r should only be called by shortcut_cond_expr. */
static tree
shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p,
location_t locus)
{
tree local_label = NULL_TREE;
tree t, expr = NULL;
/* OK, it's not a simple case; we need to pull apart the COND_EXPR to
retain the shortcut semantics. Just insert the gotos here;
shortcut_cond_expr will append the real blocks later. */
if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
location_t new_locus;
/* Turn if (a && b) into
if (a); else goto no;
if (b) goto yes; else goto no;
(no:) */
if (false_label_p == NULL)
false_label_p = &local_label;
/* Keep the original source location on the first 'if'. */
t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p, locus);
append_to_statement_list (t, &expr);
/* Set the source location of the && on the second 'if'. */
new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus;
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p,
new_locus);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
location_t new_locus;
/* Turn if (a || b) into
if (a) goto yes;
if (b) goto yes; else goto no;
(yes:) */
if (true_label_p == NULL)
true_label_p = &local_label;
/* Keep the original source location on the first 'if'. */
t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL, locus);
append_to_statement_list (t, &expr);
/* Set the source location of the || on the second 'if'. */
new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus;
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p,
new_locus);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == COND_EXPR
&& !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 1)))
&& !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 2))))
{
location_t new_locus;
/* As long as we're messing with gotos, turn if (a ? b : c) into
if (a)
if (b) goto yes; else goto no;
else
if (c) goto yes; else goto no;
Don't do this if one of the arms has void type, which can happen
in C++ when the arm is throw. */
/* Keep the original source location on the first 'if'. Set the source
location of the ? on the second 'if'. */
new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus;
expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0),
shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p,
false_label_p, locus),
shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p,
false_label_p, new_locus));
}
else
{
expr = build3 (COND_EXPR, void_type_node, pred,
build_and_jump (true_label_p),
build_and_jump (false_label_p));
SET_EXPR_LOCATION (expr, locus);
}
if (local_label)
{
t = build1 (LABEL_EXPR, void_type_node, local_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* Given a conditional expression EXPR with short-circuit boolean
predicates using TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR, break the
predicate apart into the equivalent sequence of conditionals. */
static tree
shortcut_cond_expr (tree expr)
{
tree pred = TREE_OPERAND (expr, 0);
tree then_ = TREE_OPERAND (expr, 1);
tree else_ = TREE_OPERAND (expr, 2);
tree true_label, false_label, end_label, t;
tree *true_label_p;
tree *false_label_p;
bool emit_end, emit_false, jump_over_else;
bool then_se = then_ && TREE_SIDE_EFFECTS (then_);
bool else_se = else_ && TREE_SIDE_EFFECTS (else_);
/* First do simple transformations. */
if (!else_se)
{
/* If there is no 'else', turn
if (a && b) then c
into
if (a) if (b) then c. */
while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
/* Keep the original source location on the first 'if'. */
location_t locus = EXPR_LOC_OR_LOC (expr, input_location);
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
/* Set the source location of the && on the second 'if'. */
if (EXPR_HAS_LOCATION (pred))
SET_EXPR_LOCATION (expr, EXPR_LOCATION (pred));
then_ = shortcut_cond_expr (expr);
then_se = then_ && TREE_SIDE_EFFECTS (then_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE);
SET_EXPR_LOCATION (expr, locus);
}
}
if (!then_se)
{
/* If there is no 'then', turn
if (a || b); else d
into
if (a); else if (b); else d. */
while (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
/* Keep the original source location on the first 'if'. */
location_t locus = EXPR_LOC_OR_LOC (expr, input_location);
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
/* Set the source location of the || on the second 'if'. */
if (EXPR_HAS_LOCATION (pred))
SET_EXPR_LOCATION (expr, EXPR_LOCATION (pred));
else_ = shortcut_cond_expr (expr);
else_se = else_ && TREE_SIDE_EFFECTS (else_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_);
SET_EXPR_LOCATION (expr, locus);
}
}
/* If we're done, great. */
if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR
&& TREE_CODE (pred) != TRUTH_ORIF_EXPR)
return expr;
/* Otherwise we need to mess with gotos. Change
if (a) c; else d;
to
if (a); else goto no;
c; goto end;
no: d; end:
and recursively gimplify the condition. */
true_label = false_label = end_label = NULL_TREE;
/* If our arms just jump somewhere, hijack those labels so we don't
generate jumps to jumps. */
if (then_
&& TREE_CODE (then_) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL)
{
true_label = GOTO_DESTINATION (then_);
then_ = NULL;
then_se = false;
}
if (else_
&& TREE_CODE (else_) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL)
{
false_label = GOTO_DESTINATION (else_);
else_ = NULL;
else_se = false;
}
/* If we aren't hijacking a label for the 'then' branch, it falls through. */
if (true_label)
true_label_p = &true_label;
else
true_label_p = NULL;
/* The 'else' branch also needs a label if it contains interesting code. */
if (false_label || else_se)
false_label_p = &false_label;
else
false_label_p = NULL;
/* If there was nothing else in our arms, just forward the label(s). */
if (!then_se && !else_se)
return shortcut_cond_r (pred, true_label_p, false_label_p,
EXPR_LOC_OR_LOC (expr, input_location));
/* If our last subexpression already has a terminal label, reuse it. */
if (else_se)
t = expr_last (else_);
else if (then_se)
t = expr_last (then_);
else
t = NULL;
if (t && TREE_CODE (t) == LABEL_EXPR)
end_label = LABEL_EXPR_LABEL (t);
/* If we don't care about jumping to the 'else' branch, jump to the end
if the condition is false. */
if (!false_label_p)
false_label_p = &end_label;
/* We only want to emit these labels if we aren't hijacking them. */
emit_end = (end_label == NULL_TREE);
emit_false = (false_label == NULL_TREE);
/* We only emit the jump over the else clause if we have to--if the
then clause may fall through. Otherwise we can wind up with a
useless jump and a useless label at the end of gimplified code,
which will cause us to think that this conditional as a whole
falls through even if it doesn't. If we then inline a function
which ends with such a condition, that can cause us to issue an
inappropriate warning about control reaching the end of a
non-void function. */
jump_over_else = block_may_fallthru (then_);
pred = shortcut_cond_r (pred, true_label_p, false_label_p,
EXPR_LOC_OR_LOC (expr, input_location));
expr = NULL;
append_to_statement_list (pred, &expr);
append_to_statement_list (then_, &expr);
if (else_se)
{
if (jump_over_else)
{
tree last = expr_last (expr);
t = build_and_jump (&end_label);
if (EXPR_HAS_LOCATION (last))
SET_EXPR_LOCATION (t, EXPR_LOCATION (last));
append_to_statement_list (t, &expr);
}
if (emit_false)
{
t = build1 (LABEL_EXPR, void_type_node, false_label);
append_to_statement_list (t, &expr);
}
append_to_statement_list (else_, &expr);
}
if (emit_end && end_label)
{
t = build1 (LABEL_EXPR, void_type_node, end_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */
tree
gimple_boolify (tree expr)
{
tree type = TREE_TYPE (expr);
location_t loc = EXPR_LOCATION (expr);
if (TREE_CODE (expr) == NE_EXPR
&& TREE_CODE (TREE_OPERAND (expr, 0)) == CALL_EXPR
&& integer_zerop (TREE_OPERAND (expr, 1)))
{
tree call = TREE_OPERAND (expr, 0);
tree fn = get_callee_fndecl (call);
/* For __builtin_expect ((long) (x), y) recurse into x as well
if x is truth_value_p. */
if (fn
&& DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fn) == BUILT_IN_EXPECT
&& call_expr_nargs (call) == 2)
{
tree arg = CALL_EXPR_ARG (call, 0);
if (arg)
{
if (TREE_CODE (arg) == NOP_EXPR
&& TREE_TYPE (arg) == TREE_TYPE (call))
arg = TREE_OPERAND (arg, 0);
if (truth_value_p (TREE_CODE (arg)))
{
arg = gimple_boolify (arg);
CALL_EXPR_ARG (call, 0)
= fold_convert_loc (loc, TREE_TYPE (call), arg);
}
}
}
}
switch (TREE_CODE (expr))
{
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
/* Also boolify the arguments of truth exprs. */
TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1));
/* FALLTHRU */
case TRUTH_NOT_EXPR:
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* These expressions always produce boolean results. */
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
case ANNOTATE_EXPR:
switch ((enum annot_expr_kind) TREE_INT_CST_LOW (TREE_OPERAND (expr, 1)))
{
case annot_expr_ivdep_kind:
case annot_expr_no_vector_kind:
case annot_expr_vector_kind:
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
default:
gcc_unreachable ();
}
default:
if (COMPARISON_CLASS_P (expr))
{
/* There expressions always prduce boolean results. */
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
}
/* Other expressions that get here must have boolean values, but
might need to be converted to the appropriate mode. */
if (TREE_CODE (type) == BOOLEAN_TYPE)
return expr;
return fold_convert_loc (loc, boolean_type_node, expr);
}
}
/* Given a conditional expression *EXPR_P without side effects, gimplify
its operands. New statements are inserted to PRE_P. */
static enum gimplify_status
gimplify_pure_cond_expr (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p, cond;
enum gimplify_status ret, tret;
enum tree_code code;
cond = gimple_boolify (COND_EXPR_COND (expr));
/* We need to handle && and || specially, as their gimplification
creates pure cond_expr, thus leading to an infinite cycle otherwise. */
code = TREE_CODE (cond);
if (code == TRUTH_ANDIF_EXPR)
TREE_SET_CODE (cond, TRUTH_AND_EXPR);
else if (code == TRUTH_ORIF_EXPR)
TREE_SET_CODE (cond, TRUTH_OR_EXPR);
ret = gimplify_expr (&cond, pre_p, NULL, is_gimple_condexpr, fb_rvalue);
COND_EXPR_COND (*expr_p) = cond;
tret = gimplify_expr (&COND_EXPR_THEN (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
tret = gimplify_expr (&COND_EXPR_ELSE (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
return MIN (ret, tret);
}
/* Return true if evaluating EXPR could trap.
EXPR is GENERIC, while tree_could_trap_p can be called
only on GIMPLE. */
static bool
generic_expr_could_trap_p (tree expr)
{
unsigned i, n;
if (!expr || is_gimple_val (expr))
return false;
if (!EXPR_P (expr) || tree_could_trap_p (expr))
return true;
n = TREE_OPERAND_LENGTH (expr);
for (i = 0; i < n; i++)
if (generic_expr_could_trap_p (TREE_OPERAND (expr, i)))
return true;
return false;
}
/* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;'
into
if (p) if (p)
t1 = a; a;
else or else
t1 = b; b;
t1;
The second form is used when *EXPR_P is of type void.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_cond_expr (tree *expr_p, gimple_seq *pre_p, fallback_t fallback)
{
tree expr = *expr_p;
tree type = TREE_TYPE (expr);
location_t loc = EXPR_LOCATION (expr);
tree tmp, arm1, arm2;
enum gimplify_status ret;
tree label_true, label_false, label_cont;
bool have_then_clause_p, have_else_clause_p;
gcond *cond_stmt;
enum tree_code pred_code;
gimple_seq seq = NULL;
/* If this COND_EXPR has a value, copy the values into a temporary within
the arms. */
if (!VOID_TYPE_P (type))
{
tree then_ = TREE_OPERAND (expr, 1), else_ = TREE_OPERAND (expr, 2);
tree result;
/* If either an rvalue is ok or we do not require an lvalue, create the
temporary. But we cannot do that if the type is addressable. */
if (((fallback & fb_rvalue) || !(fallback & fb_lvalue))
&& !TREE_ADDRESSABLE (type))
{
if (gimplify_ctxp->allow_rhs_cond_expr
/* If either branch has side effects or could trap, it can't be
evaluated unconditionally. */
&& !TREE_SIDE_EFFECTS (then_)
&& !generic_expr_could_trap_p (then_)
&& !TREE_SIDE_EFFECTS (else_)
&& !generic_expr_could_trap_p (else_))
return gimplify_pure_cond_expr (expr_p, pre_p);
tmp = create_tmp_var (type, "iftmp");
result = tmp;
}
/* Otherwise, only create and copy references to the values. */
else
{
type = build_pointer_type (type);
if (!VOID_TYPE_P (TREE_TYPE (then_)))
then_ = build_fold_addr_expr_loc (loc, then_);
if (!VOID_TYPE_P (TREE_TYPE (else_)))
else_ = build_fold_addr_expr_loc (loc, else_);
expr
= build3 (COND_EXPR, type, TREE_OPERAND (expr, 0), then_, else_);
tmp = create_tmp_var (type, "iftmp");
result = build_simple_mem_ref_loc (loc, tmp);
}
/* Build the new then clause, `tmp = then_;'. But don't build the
assignment if the value is void; in C++ it can be if it's a throw. */
if (!VOID_TYPE_P (TREE_TYPE (then_)))
TREE_OPERAND (expr, 1) = build2 (MODIFY_EXPR, type, tmp, then_);
/* Similarly, build the new else clause, `tmp = else_;'. */
if (!VOID_TYPE_P (TREE_TYPE (else_)))
TREE_OPERAND (expr, 2) = build2 (MODIFY_EXPR, type, tmp, else_);
TREE_TYPE (expr) = void_type_node;
recalculate_side_effects (expr);
/* Move the COND_EXPR to the prequeue. */
gimplify_stmt (&expr, pre_p);
*expr_p = result;
return GS_ALL_DONE;
}
/* Remove any COMPOUND_EXPR so the following cases will be caught. */
STRIP_TYPE_NOPS (TREE_OPERAND (expr, 0));
if (TREE_CODE (TREE_OPERAND (expr, 0)) == COMPOUND_EXPR)
gimplify_compound_expr (&TREE_OPERAND (expr, 0), pre_p, true);
/* Make sure the condition has BOOLEAN_TYPE. */
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* Break apart && and || conditions. */
if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR
|| TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR)
{
expr = shortcut_cond_expr (expr);
if (expr != *expr_p)
{
*expr_p = expr;
/* We can't rely on gimplify_expr to re-gimplify the expanded
form properly, as cleanups might cause the target labels to be
wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to
set up a conditional context. */
gimple_push_condition ();
gimplify_stmt (expr_p, &seq);
gimple_pop_condition (pre_p);
gimple_seq_add_seq (pre_p, seq);
return GS_ALL_DONE;
}
}
/* Now do the normal gimplification. */
/* Gimplify condition. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr,
fb_rvalue);
if (ret == GS_ERROR)
return GS_ERROR;
gcc_assert (TREE_OPERAND (expr, 0) != NULL_TREE);
gimple_push_condition ();
have_then_clause_p = have_else_clause_p = false;
if (TREE_OPERAND (expr, 1) != NULL
&& TREE_CODE (TREE_OPERAND (expr, 1)) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 1))) == LABEL_DECL
&& (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 1)))
== current_function_decl)
/* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR
have different locations, otherwise we end up with incorrect
location information on the branches. */
&& (optimize
|| !EXPR_HAS_LOCATION (expr)
|| !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 1))
|| EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 1))))
{
label_true = GOTO_DESTINATION (TREE_OPERAND (expr, 1));
have_then_clause_p = true;
}
else
label_true = create_artificial_label (UNKNOWN_LOCATION);
if (TREE_OPERAND (expr, 2) != NULL
&& TREE_CODE (TREE_OPERAND (expr, 2)) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 2))) == LABEL_DECL
&& (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 2)))
== current_function_decl)
/* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR
have different locations, otherwise we end up with incorrect
location information on the branches. */
&& (optimize
|| !EXPR_HAS_LOCATION (expr)
|| !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 2))
|| EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 2))))
{
label_false = GOTO_DESTINATION (TREE_OPERAND (expr, 2));
have_else_clause_p = true;
}
else
label_false = create_artificial_label (UNKNOWN_LOCATION);
gimple_cond_get_ops_from_tree (COND_EXPR_COND (expr), &pred_code, &arm1,
&arm2);
cond_stmt = gimple_build_cond (pred_code, arm1, arm2, label_true,
label_false);
gimple_set_no_warning (cond_stmt, TREE_NO_WARNING (COND_EXPR_COND (expr)));
gimplify_seq_add_stmt (&seq, cond_stmt);
gimple_stmt_iterator gsi = gsi_last (seq);
maybe_fold_stmt (&gsi);
label_cont = NULL_TREE;
if (!have_then_clause_p)
{
/* For if (...) {} else { code; } put label_true after
the else block. */
if (TREE_OPERAND (expr, 1) == NULL_TREE
&& !have_else_clause_p
&& TREE_OPERAND (expr, 2) != NULL_TREE)
label_cont = label_true;
else
{
gimplify_seq_add_stmt (&seq, gimple_build_label (label_true));
have_then_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 1), &seq);
/* For if (...) { code; } else {} or
if (...) { code; } else goto label; or
if (...) { code; return; } else { ... }
label_cont isn't needed. */
if (!have_else_clause_p
&& TREE_OPERAND (expr, 2) != NULL_TREE
&& gimple_seq_may_fallthru (seq))
{
gimple *g;
label_cont = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_goto (label_cont);
/* GIMPLE_COND's are very low level; they have embedded
gotos. This particular embedded goto should not be marked
with the location of the original COND_EXPR, as it would
correspond to the COND_EXPR's condition, not the ELSE or the
THEN arms. To avoid marking it with the wrong location, flag
it as "no location". */
gimple_set_do_not_emit_location (g);
gimplify_seq_add_stmt (&seq, g);
}
}
}
if (!have_else_clause_p)
{
gimplify_seq_add_stmt (&seq, gimple_build_label (label_false));
have_else_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 2), &seq);
}
if (label_cont)
gimplify_seq_add_stmt (&seq, gimple_build_label (label_cont));
gimple_pop_condition (pre_p);
gimple_seq_add_seq (pre_p, seq);
if (ret == GS_ERROR)
; /* Do nothing. */
else if (have_then_clause_p || have_else_clause_p)
ret = GS_ALL_DONE;
else
{
/* Both arms are empty; replace the COND_EXPR with its predicate. */
expr = TREE_OPERAND (expr, 0);
gimplify_stmt (&expr, pre_p);
}
*expr_p = NULL;
return ret;
}
/* Prepare the node pointed to by EXPR_P, an is_gimple_addressable expression,
to be marked addressable.
We cannot rely on such an expression being directly markable if a temporary
has been created by the gimplification. In this case, we create another
temporary and initialize it with a copy, which will become a store after we
mark it addressable. This can happen if the front-end passed us something
that it could not mark addressable yet, like a Fortran pass-by-reference
parameter (int) floatvar. */
static void
prepare_gimple_addressable (tree *expr_p, gimple_seq *seq_p)
{
while (handled_component_p (*expr_p))
expr_p = &TREE_OPERAND (*expr_p, 0);
if (is_gimple_reg (*expr_p))
{
/* Do not allow an SSA name as the temporary. */
tree var = get_initialized_tmp_var (*expr_p, seq_p, NULL, false);
DECL_GIMPLE_REG_P (var) = 0;
*expr_p = var;
}
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memcpy. */
static enum gimplify_status
gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value,
gimple_seq *seq_p)
{
tree t, to, to_ptr, from, from_ptr;
gcall *gs;
location_t loc = EXPR_LOCATION (*expr_p);
to = TREE_OPERAND (*expr_p, 0);
from = TREE_OPERAND (*expr_p, 1);
/* Mark the RHS addressable. Beware that it may not be possible to do so
directly if a temporary has been created by the gimplification. */
prepare_gimple_addressable (&from, seq_p);
mark_addressable (from);
from_ptr = build_fold_addr_expr_loc (loc, from);
gimplify_arg (&from_ptr, seq_p, loc);
mark_addressable (to);
to_ptr = build_fold_addr_expr_loc (loc, to);
gimplify_arg (&to_ptr, seq_p, loc);
t = builtin_decl_implicit (BUILT_IN_MEMCPY);
gs = gimple_build_call (t, 3, to_ptr, from_ptr, size);
if (want_value)
{
/* tmp = memcpy() */
t = create_tmp_var (TREE_TYPE (to_ptr));
gimple_call_set_lhs (gs, t);
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = build_simple_mem_ref (t);
return GS_ALL_DONE;
}
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = NULL;
return GS_ALL_DONE;
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memset. In this case we know that the RHS is
a CONSTRUCTOR with an empty element list. */
static enum gimplify_status
gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value,
gimple_seq *seq_p)
{
tree t, from, to, to_ptr;
gcall *gs;
location_t loc = EXPR_LOCATION (*expr_p);
/* Assert our assumptions, to abort instead of producing wrong code
silently if they are not met. Beware that the RHS CONSTRUCTOR might
not be immediately exposed. */
from = TREE_OPERAND (*expr_p, 1);
if (TREE_CODE (from) == WITH_SIZE_EXPR)
from = TREE_OPERAND (from, 0);
gcc_assert (TREE_CODE (from) == CONSTRUCTOR
&& vec_safe_is_empty (CONSTRUCTOR_ELTS (from)));
/* Now proceed. */
to = TREE_OPERAND (*expr_p, 0);
to_ptr = build_fold_addr_expr_loc (loc, to);
gimplify_arg (&to_ptr, seq_p, loc);
t = builtin_decl_implicit (BUILT_IN_MEMSET);
gs = gimple_build_call (t, 3, to_ptr, integer_zero_node, size);
if (want_value)
{
/* tmp = memset() */
t = create_tmp_var (TREE_TYPE (to_ptr));
gimple_call_set_lhs (gs, t);
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = build1 (INDIRECT_REF, TREE_TYPE (to), t);
return GS_ALL_DONE;
}
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = NULL;
return GS_ALL_DONE;
}
/* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree,
determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an
assignment. Return non-null if we detect a potential overlap. */
struct gimplify_init_ctor_preeval_data
{
/* The base decl of the lhs object. May be NULL, in which case we
have to assume the lhs is indirect. */
tree lhs_base_decl;
/* The alias set of the lhs object. */
alias_set_type lhs_alias_set;
};
static tree
gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata)
{
struct gimplify_init_ctor_preeval_data *data
= (struct gimplify_init_ctor_preeval_data *) xdata;
tree t = *tp;
/* If we find the base object, obviously we have overlap. */
if (data->lhs_base_decl == t)
return t;
/* If the constructor component is indirect, determine if we have a
potential overlap with the lhs. The only bits of information we
have to go on at this point are addressability and alias sets. */
if ((INDIRECT_REF_P (t)
|| TREE_CODE (t) == MEM_REF)
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t)))
return t;
/* If the constructor component is a call, determine if it can hide a
potential overlap with the lhs through an INDIRECT_REF like above.
??? Ugh - this is completely broken. In fact this whole analysis
doesn't look conservative. */
if (TREE_CODE (t) == CALL_EXPR)
{
tree type, fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (t)));
for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type))
if (POINTER_TYPE_P (TREE_VALUE (type))
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set,
get_alias_set
(TREE_TYPE (TREE_VALUE (type)))))
return t;
}
if (IS_TYPE_OR_DECL_P (t))
*walk_subtrees = 0;
return NULL;
}
/* A subroutine of gimplify_init_constructor. Pre-evaluate EXPR,
force values that overlap with the lhs (as described by *DATA)
into temporaries. */
static void
gimplify_init_ctor_preeval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
struct gimplify_init_ctor_preeval_data *data)
{
enum gimplify_status one;
/* If the value is constant, then there's nothing to pre-evaluate. */
if (TREE_CONSTANT (*expr_p))
{
/* Ensure it does not have side effects, it might contain a reference to
the object we're initializing. */
gcc_assert (!TREE_SIDE_EFFECTS (*expr_p));
return;
}
/* If the type has non-trivial constructors, we can't pre-evaluate. */
if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p)))
return;
/* Recurse for nested constructors. */
if (TREE_CODE (*expr_p) == CONSTRUCTOR)
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (*expr_p);
FOR_EACH_VEC_SAFE_ELT (v, ix, ce)
gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data);
return;
}
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (expr_p);
/* Gimplify the constructor element to something appropriate for the rhs
of a MODIFY_EXPR. Given that we know the LHS is an aggregate, we know
the gimplifier will consider this a store to memory. Doing this
gimplification now means that we won't have to deal with complicated
language-specific trees, nor trees like SAVE_EXPR that can induce
exponential search behavior. */
one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue);
if (one == GS_ERROR)
{
*expr_p = NULL;
return;
}
/* If we gimplified to a bare decl, we can be sure that it doesn't overlap
with the lhs, since "a = { .x=a }" doesn't make sense. This will
always be true for all scalars, since is_gimple_mem_rhs insists on a
temporary variable for them. */
if (DECL_P (*expr_p))
return;
/* If this is of variable size, we have no choice but to assume it doesn't
overlap since we can't make a temporary for it. */
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST)
return;
/* Otherwise, we must search for overlap ... */
if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL))
return;
/* ... and if found, force the value into a temporary. */
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
}
/* A subroutine of gimplify_init_ctor_eval. Create a loop for
a RANGE_EXPR in a CONSTRUCTOR for an array.
var = lower;
loop_entry:
object[var] = value;
if (var == upper)
goto loop_exit;
var = var + 1;
goto loop_entry;
loop_exit:
We increment var _after_ the loop exit check because we might otherwise
fail if upper == TYPE_MAX_VALUE (type for upper).
Note that we never have to deal with SAVE_EXPRs here, because this has
already been taken care of for us, in gimplify_init_ctor_preeval(). */
static void gimplify_init_ctor_eval (tree, vec<constructor_elt, va_gc> *,
gimple_seq *, bool);
static void
gimplify_init_ctor_eval_range (tree object, tree lower, tree upper,
tree value, tree array_elt_type,
gimple_seq *pre_p, bool cleared)
{
tree loop_entry_label, loop_exit_label, fall_thru_label;
tree var, var_type, cref, tmp;
loop_entry_label = create_artificial_label (UNKNOWN_LOCATION);
loop_exit_label = create_artificial_label (UNKNOWN_LOCATION);
fall_thru_label = create_artificial_label (UNKNOWN_LOCATION);
/* Create and initialize the index variable. */
var_type = TREE_TYPE (upper);
var = create_tmp_var (var_type);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, lower));
/* Add the loop entry label. */
gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_entry_label));
/* Build the reference. */
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
var, NULL_TREE, NULL_TREE);
/* If we are a constructor, just call gimplify_init_ctor_eval to do
the store. Otherwise just assign value to the reference. */
if (TREE_CODE (value) == CONSTRUCTOR)
/* NB we might have to call ourself recursively through
gimplify_init_ctor_eval if the value is a constructor. */
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
gimplify_seq_add_stmt (pre_p, gimple_build_assign (cref, value));
/* We exit the loop when the index var is equal to the upper bound. */
gimplify_seq_add_stmt (pre_p,
gimple_build_cond (EQ_EXPR, var, upper,
loop_exit_label, fall_thru_label));
gimplify_seq_add_stmt (pre_p, gimple_build_label (fall_thru_label));
/* Otherwise, increment the index var... */
tmp = build2 (PLUS_EXPR, var_type, var,
fold_convert (var_type, integer_one_node));
gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, tmp));
/* ...and jump back to the loop entry. */
gimplify_seq_add_stmt (pre_p, gimple_build_goto (loop_entry_label));
/* Add the loop exit label. */
gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_exit_label));
}
/* Return true if FDECL is accessing a field that is zero sized. */
static bool
zero_sized_field_decl (const_tree fdecl)
{
if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl)
&& integer_zerop (DECL_SIZE (fdecl)))
return true;
return false;
}
/* Return true if TYPE is zero sized. */
static bool
zero_sized_type (const_tree type)
{
if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type)
&& integer_zerop (TYPE_SIZE (type)))
return true;
return false;
}
/* A subroutine of gimplify_init_constructor. Generate individual
MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the
assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the
CONSTRUCTOR. CLEARED is true if the entire LHS object has been
zeroed first. */
static void
gimplify_init_ctor_eval (tree object, vec<constructor_elt, va_gc> *elts,
gimple_seq *pre_p, bool cleared)
{
tree array_elt_type = NULL;
unsigned HOST_WIDE_INT ix;
tree purpose, value;
if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE)
array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object)));
FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value)
{
tree cref;
/* NULL values are created above for gimplification errors. */
if (value == NULL)
continue;
if (cleared && initializer_zerop (value))
continue;
/* ??? Here's to hoping the front end fills in all of the indices,
so we don't have to figure out what's missing ourselves. */
gcc_assert (purpose);
/* Skip zero-sized fields, unless value has side-effects. This can
happen with calls to functions returning a zero-sized type, which
we shouldn't discard. As a number of downstream passes don't
expect sets of zero-sized fields, we rely on the gimplification of
the MODIFY_EXPR we make below to drop the assignment statement. */
if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose))
continue;
/* If we have a RANGE_EXPR, we have to build a loop to assign the
whole range. */
if (TREE_CODE (purpose) == RANGE_EXPR)
{
tree lower = TREE_OPERAND (purpose, 0);
tree upper = TREE_OPERAND (purpose, 1);
/* If the lower bound is equal to upper, just treat it as if
upper was the index. */
if (simple_cst_equal (lower, upper))
purpose = upper;
else
{
gimplify_init_ctor_eval_range (object, lower, upper, value,
array_elt_type, pre_p, cleared);
continue;
}
}
if (array_elt_type)
{
/* Do not use bitsizetype for ARRAY_REF indices. */
if (TYPE_DOMAIN (TREE_TYPE (object)))
purpose
= fold_convert (TREE_TYPE (TYPE_DOMAIN (TREE_TYPE (object))),
purpose);
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
purpose, NULL_TREE, NULL_TREE);
}
else
{
gcc_assert (TREE_CODE (purpose) == FIELD_DECL);
cref = build3 (COMPONENT_REF, TREE_TYPE (purpose),
unshare_expr (object), purpose, NULL_TREE);
}
if (TREE_CODE (value) == CONSTRUCTOR
&& TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE)
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
{
tree init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value);
gimplify_and_add (init, pre_p);
ggc_free (init);
}
}
}
/* Return the appropriate RHS predicate for this LHS. */
gimple_predicate
rhs_predicate_for (tree lhs)
{
if (is_gimple_reg (lhs))
return is_gimple_reg_rhs_or_call;
else
return is_gimple_mem_rhs_or_call;
}
/* Return the initial guess for an appropriate RHS predicate for this LHS,
before the LHS has been gimplified. */
static gimple_predicate
initial_rhs_predicate_for (tree lhs)
{
if (is_gimple_reg_type (TREE_TYPE (lhs)))
return is_gimple_reg_rhs_or_call;
else
return is_gimple_mem_rhs_or_call;
}
/* Gimplify a C99 compound literal expression. This just means adding
the DECL_EXPR before the current statement and using its anonymous
decl instead. */
static enum gimplify_status
gimplify_compound_literal_expr (tree *expr_p, gimple_seq *pre_p,
bool (*gimple_test_f) (tree),
fallback_t fallback)
{
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (*expr_p);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
/* Mark the decl as addressable if the compound literal
expression is addressable now, otherwise it is marked too late
after we gimplify the initialization expression. */
if (TREE_ADDRESSABLE (*expr_p))
TREE_ADDRESSABLE (decl) = 1;
/* Otherwise, if we don't need an lvalue and have a literal directly
substitute it. Check if it matches the gimple predicate, as
otherwise we'd generate a new temporary, and we can as well just
use the decl we already have. */
else if (!TREE_ADDRESSABLE (decl)
&& init
&& (fallback & fb_lvalue) == 0
&& gimple_test_f (init))
{
*expr_p = init;
return GS_OK;
}
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (decl)
&& !needs_to_live_in_memory (decl))
DECL_GIMPLE_REG_P (decl) = 1;
/* If the decl is not addressable, then it is being used in some
expression or on the right hand side of a statement, and it can
be put into a readonly data section. */
if (!TREE_ADDRESSABLE (decl) && (fallback & fb_lvalue) == 0)
TREE_READONLY (decl) = 1;
/* This decl isn't mentioned in the enclosing block, so add it to the
list of temps. FIXME it seems a bit of a kludge to say that
anonymous artificial vars aren't pushed, but everything else is. */
if (DECL_NAME (decl) == NULL_TREE && !DECL_SEEN_IN_BIND_EXPR_P (decl))
gimple_add_tmp_var (decl);
gimplify_and_add (decl_s, pre_p);
*expr_p = decl;
return GS_OK;
}
/* Optimize embedded COMPOUND_LITERAL_EXPRs within a CONSTRUCTOR,
return a new CONSTRUCTOR if something changed. */
static tree
optimize_compound_literals_in_ctor (tree orig_ctor)
{
tree ctor = orig_ctor;
vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (ctor);
unsigned int idx, num = vec_safe_length (elts);
for (idx = 0; idx < num; idx++)
{
tree value = (*elts)[idx].value;
tree newval = value;
if (TREE_CODE (value) == CONSTRUCTOR)
newval = optimize_compound_literals_in_ctor (value);
else if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR)
{
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (value);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
if (!TREE_ADDRESSABLE (value)
&& !TREE_ADDRESSABLE (decl)
&& init
&& TREE_CODE (init) == CONSTRUCTOR)
newval = optimize_compound_literals_in_ctor (init);
}
if (newval == value)
continue;
if (ctor == orig_ctor)
{
ctor = copy_node (orig_ctor);
CONSTRUCTOR_ELTS (ctor) = vec_safe_copy (elts);
elts = CONSTRUCTOR_ELTS (ctor);
}
(*elts)[idx].value = newval;
}
return ctor;
}
/* A subroutine of gimplify_modify_expr. Break out elements of a
CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs.
Note that we still need to clear any elements that don't have explicit
initializers, so if not all elements are initialized we keep the
original MODIFY_EXPR, we just remove all of the constructor elements.
If NOTIFY_TEMP_CREATION is true, do not gimplify, just return
GS_ERROR if we would have to create a temporary when gimplifying
this constructor. Otherwise, return GS_OK.
If NOTIFY_TEMP_CREATION is false, just do the gimplification. */
static enum gimplify_status
gimplify_init_constructor (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value, bool notify_temp_creation)
{
tree object, ctor, type;
enum gimplify_status ret;
vec<constructor_elt, va_gc> *elts;
gcc_assert (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == CONSTRUCTOR);
if (!notify_temp_creation)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
}
object = TREE_OPERAND (*expr_p, 0);
ctor = TREE_OPERAND (*expr_p, 1)
= optimize_compound_literals_in_ctor (TREE_OPERAND (*expr_p, 1));
type = TREE_TYPE (ctor);
elts = CONSTRUCTOR_ELTS (ctor);
ret = GS_ALL_DONE;
switch (TREE_CODE (type))
{
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
case ARRAY_TYPE:
{
struct gimplify_init_ctor_preeval_data preeval_data;
HOST_WIDE_INT num_ctor_elements, num_nonzero_elements;
bool cleared, complete_p, valid_const_initializer;
/* Aggregate types must lower constructors to initialization of
individual elements. The exception is that a CONSTRUCTOR node
with no elements indicates zero-initialization of the whole. */
if (vec_safe_is_empty (elts))
{
if (notify_temp_creation)
return GS_OK;
break;
}
/* Fetch information about the constructor to direct later processing.
We might want to make static versions of it in various cases, and
can only do so if it known to be a valid constant initializer. */
valid_const_initializer
= categorize_ctor_elements (ctor, &num_nonzero_elements,
&num_ctor_elements, &complete_p);
/* If a const aggregate variable is being initialized, then it
should never be a lose to promote the variable to be static. */
if (valid_const_initializer
&& num_nonzero_elements > 1
&& TREE_READONLY (object)
&& VAR_P (object)
&& (flag_merge_constants >= 2 || !TREE_ADDRESSABLE (object)))
{
if (notify_temp_creation)
return GS_ERROR;
DECL_INITIAL (object) = ctor;
TREE_STATIC (object) = 1;
if (!DECL_NAME (object))
DECL_NAME (object) = create_tmp_var_name ("C");
walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL);
/* ??? C++ doesn't automatically append a .<number> to the
assembler name, and even when it does, it looks at FE private
data structures to figure out what that number should be,
which are not set for this variable. I suppose this is
important for local statics for inline functions, which aren't
"local" in the object file sense. So in order to get a unique
TU-local symbol, we must invoke the lhd version now. */
lhd_set_decl_assembler_name (object);
*expr_p = NULL_TREE;
break;
}
/* If there are "lots" of initialized elements, even discounting
those that are not address constants (and thus *must* be
computed at runtime), then partition the constructor into
constant and non-constant parts. Block copy the constant
parts in, then generate code for the non-constant parts. */
/* TODO. There's code in cp/typeck.c to do this. */
if (int_size_in_bytes (TREE_TYPE (ctor)) < 0)
/* store_constructor will ignore the clearing of variable-sized
objects. Initializers for such objects must explicitly set
every field that needs to be set. */
cleared = false;
else if (!complete_p && !CONSTRUCTOR_NO_CLEARING (ctor))
/* If the constructor isn't complete, clear the whole object
beforehand, unless CONSTRUCTOR_NO_CLEARING is set on it.
??? This ought not to be needed. For any element not present
in the initializer, we should simply set them to zero. Except
we'd need to *find* the elements that are not present, and that
requires trickery to avoid quadratic compile-time behavior in
large cases or excessive memory use in small cases. */
cleared = true;
else if (num_ctor_elements - num_nonzero_elements
> CLEAR_RATIO (optimize_function_for_speed_p (cfun))
&& num_nonzero_elements < num_ctor_elements / 4)
/* If there are "lots" of zeros, it's more efficient to clear
the memory and then set the nonzero elements. */
cleared = true;
else
cleared = false;
/* If there are "lots" of initialized elements, and all of them
are valid address constants, then the entire initializer can
be dropped to memory, and then memcpy'd out. Don't do this
for sparse arrays, though, as it's more efficient to follow
the standard CONSTRUCTOR behavior of memset followed by
individual element initialization. Also don't do this for small
all-zero initializers (which aren't big enough to merit
clearing), and don't try to make bitwise copies of
TREE_ADDRESSABLE types.
We cannot apply such transformation when compiling chkp static
initializer because creation of initializer image in the memory
will require static initialization of bounds for it. It should
result in another gimplification of similar initializer and we
may fall into infinite loop. */
if (valid_const_initializer
&& !(cleared || num_nonzero_elements == 0)
&& !TREE_ADDRESSABLE (type)
&& (!current_function_decl
|| !lookup_attribute ("chkp ctor",
DECL_ATTRIBUTES (current_function_decl))))
{
HOST_WIDE_INT size = int_size_in_bytes (type);
unsigned int align;
/* ??? We can still get unbounded array types, at least
from the C++ front end. This seems wrong, but attempt
to work around it for now. */
if (size < 0)
{
size = int_size_in_bytes (TREE_TYPE (object));
if (size >= 0)
TREE_TYPE (ctor) = type = TREE_TYPE (object);
}
/* Find the maximum alignment we can assume for the object. */
/* ??? Make use of DECL_OFFSET_ALIGN. */
if (DECL_P (object))
align = DECL_ALIGN (object);
else
align = TYPE_ALIGN (type);
/* Do a block move either if the size is so small as to make
each individual move a sub-unit move on average, or if it
is so large as to make individual moves inefficient. */
if (size > 0
&& num_nonzero_elements > 1
&& (size < num_nonzero_elements
|| !can_move_by_pieces (size, align)))
{
if (notify_temp_creation)
return GS_ERROR;
walk_tree (&ctor, force_labels_r, NULL, NULL);
ctor = tree_output_constant_def (ctor);
if (!useless_type_conversion_p (type, TREE_TYPE (ctor)))
ctor = build1 (VIEW_CONVERT_EXPR, type, ctor);
TREE_OPERAND (*expr_p, 1) = ctor;
/* This is no longer an assignment of a CONSTRUCTOR, but
we still may have processing to do on the LHS. So
pretend we didn't do anything here to let that happen. */
return GS_UNHANDLED;
}
}
/* If the target is volatile, we have non-zero elements and more than
one field to assign, initialize the target from a temporary. */
if (TREE_THIS_VOLATILE (object)
&& !TREE_ADDRESSABLE (type)
&& num_nonzero_elements > 0
&& vec_safe_length (elts) > 1)
{
tree temp = create_tmp_var (TYPE_MAIN_VARIANT (type));
TREE_OPERAND (*expr_p, 0) = temp;
*expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p),
*expr_p,
build2 (MODIFY_EXPR, void_type_node,
object, temp));
return GS_OK;
}
if (notify_temp_creation)
return GS_OK;
/* If there are nonzero elements and if needed, pre-evaluate to capture
elements overlapping with the lhs into temporaries. We must do this
before clearing to fetch the values before they are zeroed-out. */
if (num_nonzero_elements > 0 && TREE_CODE (*expr_p) != INIT_EXPR)
{
preeval_data.lhs_base_decl = get_base_address (object);
if (!DECL_P (preeval_data.lhs_base_decl))
preeval_data.lhs_base_decl = NULL;
preeval_data.lhs_alias_set = get_alias_set (object);
gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1),
pre_p, post_p, &preeval_data);
}
bool ctor_has_side_effects_p
= TREE_SIDE_EFFECTS (TREE_OPERAND (*expr_p, 1));
if (cleared)
{
/* Zap the CONSTRUCTOR element list, which simplifies this case.
Note that we still have to gimplify, in order to handle the
case of variable sized types. Avoid shared tree structures. */
CONSTRUCTOR_ELTS (ctor) = NULL;
TREE_SIDE_EFFECTS (ctor) = 0;
object = unshare_expr (object);
gimplify_stmt (expr_p, pre_p);
}
/* If we have not block cleared the object, or if there are nonzero
elements in the constructor, or if the constructor has side effects,
add assignments to the individual scalar fields of the object. */
if (!cleared
|| num_nonzero_elements > 0
|| ctor_has_side_effects_p)
gimplify_init_ctor_eval (object, elts, pre_p, cleared);
*expr_p = NULL_TREE;
}
break;
case COMPLEX_TYPE:
{
tree r, i;
if (notify_temp_creation)
return GS_OK;
/* Extract the real and imaginary parts out of the ctor. */
gcc_assert (elts->length () == 2);
r = (*elts)[0].value;
i = (*elts)[1].value;
if (r == NULL || i == NULL)
{
tree zero = build_zero_cst (TREE_TYPE (type));
if (r == NULL)
r = zero;
if (i == NULL)
i = zero;
}
/* Complex types have either COMPLEX_CST or COMPLEX_EXPR to
represent creation of a complex value. */
if (TREE_CONSTANT (r) && TREE_CONSTANT (i))
{
ctor = build_complex (type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
}
else
{
ctor = build2 (COMPLEX_EXPR, type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1),
pre_p,
post_p,
rhs_predicate_for (TREE_OPERAND (*expr_p, 0)),
fb_rvalue);
}
}
break;
case VECTOR_TYPE:
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
if (notify_temp_creation)
return GS_OK;
/* Go ahead and simplify constant constructors to VECTOR_CST. */
if (TREE_CONSTANT (ctor))
{
bool constant_p = true;
tree value;
/* Even when ctor is constant, it might contain non-*_CST
elements, such as addresses or trapping values like
1.0/0.0 - 1.0/0.0. Such expressions don't belong
in VECTOR_CST nodes. */
FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value)
if (!CONSTANT_CLASS_P (value))
{
constant_p = false;
break;
}
if (constant_p)
{
TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts);
break;
}
TREE_CONSTANT (ctor) = 0;
}
/* Vector types use CONSTRUCTOR all the way through gimple
compilation as a general initializer. */
FOR_EACH_VEC_SAFE_ELT (elts, ix, ce)
{
enum gimplify_status tret;
tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val,
fb_rvalue);
if (tret == GS_ERROR)
ret = GS_ERROR;
else if (TREE_STATIC (ctor)
&& !initializer_constant_valid_p (ce->value,
TREE_TYPE (ce->value)))
TREE_STATIC (ctor) = 0;
}
if (!is_gimple_reg (TREE_OPERAND (*expr_p, 0)))
TREE_OPERAND (*expr_p, 1) = get_formal_tmp_var (ctor, pre_p);
}
break;
default:
/* So how did we get a CONSTRUCTOR for a scalar type? */
gcc_unreachable ();
}
if (ret == GS_ERROR)
return GS_ERROR;
/* If we have gimplified both sides of the initializer but have
not emitted an assignment, do so now. */
if (*expr_p)
{
tree lhs = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_OPERAND (*expr_p, 1);
if (want_value && object == lhs)
lhs = unshare_expr (lhs);
gassign *init = gimple_build_assign (lhs, rhs);
gimplify_seq_add_stmt (pre_p, init);
}
if (want_value)
{
*expr_p = object;
return GS_OK;
}
else
{
*expr_p = NULL;
return GS_ALL_DONE;
}
}
/* Given a pointer value OP0, return a simplified version of an
indirection through OP0, or NULL_TREE if no simplification is
possible. This may only be applied to a rhs of an expression.
Note that the resulting type may be different from the type pointed
to in the sense that it is still compatible from the langhooks
point of view. */
static tree
gimple_fold_indirect_ref_rhs (tree t)
{
return gimple_fold_indirect_ref (t);
}
/* Subroutine of gimplify_modify_expr to do simplifications of
MODIFY_EXPRs based on the code of the RHS. We loop for as long as
something changes. */
static enum gimplify_status
gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p,
gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
enum gimplify_status ret = GS_UNHANDLED;
bool changed;
do
{
changed = false;
switch (TREE_CODE (*from_p))
{
case VAR_DECL:
/* If we're assigning from a read-only variable initialized with
a constructor, do the direct assignment from the constructor,
but only if neither source nor target are volatile since this
latter assignment might end up being done on a per-field basis. */
if (DECL_INITIAL (*from_p)
&& TREE_READONLY (*from_p)
&& !TREE_THIS_VOLATILE (*from_p)
&& !TREE_THIS_VOLATILE (*to_p)
&& TREE_CODE (DECL_INITIAL (*from_p)) == CONSTRUCTOR)
{
tree old_from = *from_p;
enum gimplify_status subret;
/* Move the constructor into the RHS. */
*from_p = unshare_expr (DECL_INITIAL (*from_p));
/* Let's see if gimplify_init_constructor will need to put
it in memory. */
subret = gimplify_init_constructor (expr_p, NULL, NULL,
false, true);
if (subret == GS_ERROR)
{
/* If so, revert the change. */
*from_p = old_from;
}
else
{
ret = GS_OK;
changed = true;
}
}
break;
case INDIRECT_REF:
{
/* If we have code like
*(const A*)(A*)&x
where the type of "x" is a (possibly cv-qualified variant
of "A"), treat the entire expression as identical to "x".
This kind of code arises in C++ when an object is bound
to a const reference, and if "x" is a TARGET_EXPR we want
to take advantage of the optimization below. */
bool volatile_p = TREE_THIS_VOLATILE (*from_p);
tree t = gimple_fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0));
if (t)
{
if (TREE_THIS_VOLATILE (t) != volatile_p)
{
if (DECL_P (t))
t = build_simple_mem_ref_loc (EXPR_LOCATION (*from_p),
build_fold_addr_expr (t));
if (REFERENCE_CLASS_P (t))
TREE_THIS_VOLATILE (t) = volatile_p;
}
*from_p = t;
ret = GS_OK;
changed = true;
}
break;
}
case TARGET_EXPR:
{
/* If we are initializing something from a TARGET_EXPR, strip the
TARGET_EXPR and initialize it directly, if possible. This can't
be done if the initializer is void, since that implies that the
temporary is set in some non-trivial way.
??? What about code that pulls out the temp and uses it
elsewhere? I think that such code never uses the TARGET_EXPR as
an initializer. If I'm wrong, we'll die because the temp won't
have any RTL. In that case, I guess we'll need to replace
references somehow. */
tree init = TARGET_EXPR_INITIAL (*from_p);
if (init
&& !VOID_TYPE_P (TREE_TYPE (init)))
{
*from_p = init;
ret = GS_OK;
changed = true;
}
}
break;
case COMPOUND_EXPR:
/* Remove any COMPOUND_EXPR in the RHS so the following cases will be
caught. */
gimplify_compound_expr (from_p, pre_p, true);
ret = GS_OK;
changed = true;
break;
case CONSTRUCTOR:
/* If we already made some changes, let the front end have a
crack at this before we break it down. */
if (ret != GS_UNHANDLED)
break;
/* If we're initializing from a CONSTRUCTOR, break this into
individual MODIFY_EXPRs. */
return gimplify_init_constructor (expr_p, pre_p, post_p, want_value,
false);
case COND_EXPR:
/* If we're assigning to a non-register type, push the assignment
down into the branches. This is mandatory for ADDRESSABLE types,
since we cannot generate temporaries for such, but it saves a
copy in other cases as well. */
if (!is_gimple_reg_type (TREE_TYPE (*from_p)))
{
/* This code should mirror the code in gimplify_cond_expr. */
enum tree_code code = TREE_CODE (*expr_p);
tree cond = *from_p;
tree result = *to_p;
ret = gimplify_expr (&result, pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
/* If we are going to write RESULT more than once, clear
TREE_READONLY flag, otherwise we might incorrectly promote
the variable to static const and initialize it at compile
time in one of the branches. */
if (VAR_P (result)
&& TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node
&& TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node)
TREE_READONLY (result) = 0;
if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node)
TREE_OPERAND (cond, 1)
= build2 (code, void_type_node, result,
TREE_OPERAND (cond, 1));
if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node)
TREE_OPERAND (cond, 2)
= build2 (code, void_type_node, unshare_expr (result),
TREE_OPERAND (cond, 2));
TREE_TYPE (cond) = void_type_node;
recalculate_side_effects (cond);
if (want_value)
{
gimplify_and_add (cond, pre_p);
*expr_p = unshare_expr (result);
}
else
*expr_p = cond;
return ret;
}
break;
case CALL_EXPR:
/* For calls that return in memory, give *to_p as the CALL_EXPR's
return slot so that we don't generate a temporary. */
if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p)
&& aggregate_value_p (*from_p, *from_p))
{
bool use_target;
if (!(rhs_predicate_for (*to_p))(*from_p))
/* If we need a temporary, *to_p isn't accurate. */
use_target = false;
/* It's OK to use the return slot directly unless it's an NRV. */
else if (TREE_CODE (*to_p) == RESULT_DECL
&& DECL_NAME (*to_p) == NULL_TREE
&& needs_to_live_in_memory (*to_p))
use_target = true;
else if (is_gimple_reg_type (TREE_TYPE (*to_p))
|| (DECL_P (*to_p) && DECL_REGISTER (*to_p)))
/* Don't force regs into memory. */
use_target = false;
else if (TREE_CODE (*expr_p) == INIT_EXPR)
/* It's OK to use the target directly if it's being
initialized. */
use_target = true;
else if (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (*to_p)))
!= INTEGER_CST)
/* Always use the target and thus RSO for variable-sized types.
GIMPLE cannot deal with a variable-sized assignment
embedded in a call statement. */
use_target = true;
else if (TREE_CODE (*to_p) != SSA_NAME
&& (!is_gimple_variable (*to_p)
|| needs_to_live_in_memory (*to_p)))
/* Don't use the original target if it's already addressable;
if its address escapes, and the called function uses the
NRV optimization, a conforming program could see *to_p
change before the called function returns; see c++/19317.
When optimizing, the return_slot pass marks more functions
as safe after we have escape info. */
use_target = false;
else
use_target = true;
if (use_target)
{
CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1;
mark_addressable (*to_p);
}
}
break;
case WITH_SIZE_EXPR:
/* Likewise for calls that return an aggregate of non-constant size,
since we would not be able to generate a temporary at all. */
if (TREE_CODE (TREE_OPERAND (*from_p, 0)) == CALL_EXPR)
{
*from_p = TREE_OPERAND (*from_p, 0);
/* We don't change ret in this case because the
WITH_SIZE_EXPR might have been added in
gimplify_modify_expr, so returning GS_OK would lead to an
infinite loop. */
changed = true;
}
break;
/* If we're initializing from a container, push the initialization
inside it. */
case CLEANUP_POINT_EXPR:
case BIND_EXPR:
case STATEMENT_LIST:
{
tree wrap = *from_p;
tree t;
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval,
fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
t = voidify_wrapper_expr (wrap, *expr_p);
gcc_assert (t == *expr_p);
if (want_value)
{
gimplify_and_add (wrap, pre_p);
*expr_p = unshare_expr (*to_p);
}
else
*expr_p = wrap;
return GS_OK;
}
case COMPOUND_LITERAL_EXPR:
{
tree complit = TREE_OPERAND (*expr_p, 1);
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (complit);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
/* struct T x = (struct T) { 0, 1, 2 } can be optimized
into struct T x = { 0, 1, 2 } if the address of the
compound literal has never been taken. */
if (!TREE_ADDRESSABLE (complit)
&& !TREE_ADDRESSABLE (decl)
&& init)
{
*expr_p = copy_node (*expr_p);
TREE_OPERAND (*expr_p, 1) = init;
return GS_OK;
}
}
default:
break;
}
}
while (changed);
return ret;
}
/* Return true if T looks like a valid GIMPLE statement. */
static bool
is_gimple_stmt (tree t)
{
const enum tree_code code = TREE_CODE (t);
switch (code)
{
case NOP_EXPR:
/* The only valid NOP_EXPR is the empty statement. */
return IS_EMPTY_STMT (t);
case BIND_EXPR:
case COND_EXPR:
/* These are only valid if they're void. */
return TREE_TYPE (t) == NULL || VOID_TYPE_P (TREE_TYPE (t));
case SWITCH_EXPR:
case GOTO_EXPR:
case RETURN_EXPR:
case LABEL_EXPR:
case CASE_LABEL_EXPR:
case TRY_CATCH_EXPR:
case TRY_FINALLY_EXPR:
case EH_FILTER_EXPR:
case CATCH_EXPR:
case ASM_EXPR:
case STATEMENT_LIST:
case OACC_PARALLEL:
case OACC_KERNELS:
case OACC_DATA:
case OACC_HOST_DATA:
case OACC_DECLARE:
case OACC_UPDATE:
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_CACHE:
case OMP_PARALLEL:
case OMP_FOR:
case OMP_SIMD:
case CILK_SIMD:
case OMP_DISTRIBUTE:
case OACC_LOOP:
case OMP_SECTIONS:
case OMP_SECTION:
case OMP_SINGLE:
case OMP_MASTER:
case OMP_TASKGROUP:
case OMP_ORDERED:
case OMP_CRITICAL:
case OMP_TASK:
case OMP_TARGET:
case OMP_TARGET_DATA:
case OMP_TARGET_UPDATE:
case OMP_TARGET_ENTER_DATA:
case OMP_TARGET_EXIT_DATA:
case OMP_TASKLOOP:
case OMP_TEAMS:
/* These are always void. */
return true;
case CALL_EXPR:
case MODIFY_EXPR:
case PREDICT_EXPR:
/* These are valid regardless of their type. */
return true;
default:
return false;
}
}
/* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is
a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with
DECL_GIMPLE_REG_P set.
IMPORTANT NOTE: This promotion is performed by introducing a load of the
other, unmodified part of the complex object just before the total store.
As a consequence, if the object is still uninitialized, an undefined value
will be loaded into a register, which may result in a spurious exception
if the register is floating-point and the value happens to be a signaling
NaN for example. Then the fully-fledged complex operations lowering pass
followed by a DCE pass are necessary in order to fix things up. */
static enum gimplify_status
gimplify_modify_expr_complex_part (tree *expr_p, gimple_seq *pre_p,
bool want_value)
{
enum tree_code code, ocode;
tree lhs, rhs, new_rhs, other, realpart, imagpart;
lhs = TREE_OPERAND (*expr_p, 0);
rhs = TREE_OPERAND (*expr_p, 1);
code = TREE_CODE (lhs);
lhs = TREE_OPERAND (lhs, 0);
ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR;
other = build1 (ocode, TREE_TYPE (rhs), lhs);
TREE_NO_WARNING (other) = 1;
other = get_formal_tmp_var (other, pre_p);
realpart = code == REALPART_EXPR ? rhs : other;
imagpart = code == REALPART_EXPR ? other : rhs;
if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart))
new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart);
else
new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (lhs, new_rhs));
*expr_p = (want_value) ? rhs : NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify the MODIFY_EXPR node pointed to by EXPR_P.
modify_expr
: varname '=' rhs
| '*' ID '=' rhs
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression. */
static enum gimplify_status
gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
tree *from_p = &TREE_OPERAND (*expr_p, 1);
tree *to_p = &TREE_OPERAND (*expr_p, 0);
enum gimplify_status ret = GS_UNHANDLED;
gimple *assign;
location_t loc = EXPR_LOCATION (*expr_p);
gimple_stmt_iterator gsi;
gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
|| TREE_CODE (*expr_p) == INIT_EXPR);
/* Trying to simplify a clobber using normal logic doesn't work,
so handle it here. */
if (TREE_CLOBBER_P (*from_p))
{
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
gcc_assert (!want_value
&& (VAR_P (*to_p) || TREE_CODE (*to_p) == MEM_REF));
gimplify_seq_add_stmt (pre_p, gimple_build_assign (*to_p, *from_p));
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Insert pointer conversions required by the middle-end that are not
required by the frontend. This fixes middle-end type checking for
for example gcc.dg/redecl-6.c. */
if (POINTER_TYPE_P (TREE_TYPE (*to_p)))
{
STRIP_USELESS_TYPE_CONVERSION (*from_p);
if (!useless_type_conversion_p (TREE_TYPE (*to_p), TREE_TYPE (*from_p)))
*from_p = fold_convert_loc (loc, TREE_TYPE (*to_p), *from_p);
}
/* See if any simplifications can be done based on what the RHS is. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* For zero sized types only gimplify the left hand side and right hand
side as statements and throw away the assignment. Do this after
gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable
types properly. */
if (zero_sized_type (TREE_TYPE (*from_p))
&& !want_value
/* Don't do this for calls that return addressable types, expand_call
relies on those having a lhs. */
&& !(TREE_ADDRESSABLE (TREE_TYPE (*from_p))
&& TREE_CODE (*from_p) == CALL_EXPR))
{
gimplify_stmt (from_p, pre_p);
gimplify_stmt (to_p, pre_p);
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* If the value being copied is of variable width, compute the length
of the copy into a WITH_SIZE_EXPR. Note that we need to do this
before gimplifying any of the operands so that we can resolve any
PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses
the size of the expression to be copied, not of the destination, so
that is what we must do here. */
maybe_with_size_expr (from_p);
/* As a special case, we have to temporarily allow for assignments
with a CALL_EXPR on the RHS. Since in GIMPLE a function call is
a toplevel statement, when gimplifying the GENERIC expression
MODIFY_EXPR <a, CALL_EXPR <foo>>, we cannot create the tuple
GIMPLE_ASSIGN <a, GIMPLE_CALL <foo>>.
Instead, we need to create the tuple GIMPLE_CALL <a, foo>. To
prevent gimplify_expr from trying to create a new temporary for
foo's LHS, we tell it that it should only gimplify until it
reaches the CALL_EXPR. On return from gimplify_expr, the newly
created GIMPLE_CALL <foo> will be the last statement in *PRE_P
and all we need to do here is set 'a' to be its LHS. */
/* Gimplify the RHS first for C++17 and bug 71104. */
gimple_predicate initial_pred = initial_rhs_predicate_for (*to_p);
ret = gimplify_expr (from_p, pre_p, post_p, initial_pred, fb_rvalue);
if (ret == GS_ERROR)
return ret;
/* Then gimplify the LHS. */
/* If we gimplified the RHS to a CALL_EXPR and that call may return
twice we have to make sure to gimplify into non-SSA as otherwise
the abnormal edge added later will make those defs not dominate
their uses.
??? Technically this applies only to the registers used in the
resulting non-register *TO_P. */
bool saved_into_ssa = gimplify_ctxp->into_ssa;
if (saved_into_ssa
&& TREE_CODE (*from_p) == CALL_EXPR
&& call_expr_flags (*from_p) & ECF_RETURNS_TWICE)
gimplify_ctxp->into_ssa = false;
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
gimplify_ctxp->into_ssa = saved_into_ssa;
if (ret == GS_ERROR)
return ret;
/* Now that the LHS is gimplified, re-gimplify the RHS if our initial
guess for the predicate was wrong. */
gimple_predicate final_pred = rhs_predicate_for (*to_p);
if (final_pred != initial_pred)
{
ret = gimplify_expr (from_p, pre_p, post_p, final_pred, fb_rvalue);
if (ret == GS_ERROR)
return ret;
}
/* In case of va_arg internal fn wrappped in a WITH_SIZE_EXPR, add the type
size as argument to the call. */
if (TREE_CODE (*from_p) == WITH_SIZE_EXPR)
{
tree call = TREE_OPERAND (*from_p, 0);
tree vlasize = TREE_OPERAND (*from_p, 1);
if (TREE_CODE (call) == CALL_EXPR
&& CALL_EXPR_IFN (call) == IFN_VA_ARG)
{
int nargs = call_expr_nargs (call);
tree type = TREE_TYPE (call);
tree ap = CALL_EXPR_ARG (call, 0);
tree tag = CALL_EXPR_ARG (call, 1);
tree aptag = CALL_EXPR_ARG (call, 2);
tree newcall = build_call_expr_internal_loc (EXPR_LOCATION (call),
IFN_VA_ARG, type,
nargs + 1, ap, tag,
aptag, vlasize);
TREE_OPERAND (*from_p, 0) = newcall;
}
}
/* Now see if the above changed *from_p to something we handle specially. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* If we've got a variable sized assignment between two lvalues (i.e. does
not involve a call), then we can make things a bit more straightforward
by converting the assignment to memcpy or memset. */
if (TREE_CODE (*from_p) == WITH_SIZE_EXPR)
{
tree from = TREE_OPERAND (*from_p, 0);
tree size = TREE_OPERAND (*from_p, 1);
if (TREE_CODE (from) == CONSTRUCTOR)
return gimplify_modify_expr_to_memset (expr_p, size, want_value, pre_p);
if (is_gimple_addressable (from))
{
*from_p = from;
return gimplify_modify_expr_to_memcpy (expr_p, size, want_value,
pre_p);
}
}
/* Transform partial stores to non-addressable complex variables into
total stores. This allows us to use real instead of virtual operands
for these variables, which improves optimization. */
if ((TREE_CODE (*to_p) == REALPART_EXPR
|| TREE_CODE (*to_p) == IMAGPART_EXPR)
&& is_gimple_reg (TREE_OPERAND (*to_p, 0)))
return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value);
/* Try to alleviate the effects of the gimplification creating artificial
temporaries (see for example is_gimple_reg_rhs) on the debug info, but
make sure not to create DECL_DEBUG_EXPR links across functions. */
if (!gimplify_ctxp->into_ssa
&& VAR_P (*from_p)
&& DECL_IGNORED_P (*from_p)
&& DECL_P (*to_p)
&& !DECL_IGNORED_P (*to_p)
&& decl_function_context (*to_p) == current_function_decl)
{
if (!DECL_NAME (*from_p) && DECL_NAME (*to_p))
DECL_NAME (*from_p)
= create_tmp_var_name (IDENTIFIER_POINTER (DECL_NAME (*to_p)));
DECL_HAS_DEBUG_EXPR_P (*from_p) = 1;
SET_DECL_DEBUG_EXPR (*from_p, *to_p);
}
if (want_value && TREE_THIS_VOLATILE (*to_p))
*from_p = get_initialized_tmp_var (*from_p, pre_p, post_p);
if (TREE_CODE (*from_p) == CALL_EXPR)
{
/* Since the RHS is a CALL_EXPR, we need to create a GIMPLE_CALL
instead of a GIMPLE_ASSIGN. */
gcall *call_stmt;
if (CALL_EXPR_FN (*from_p) == NULL_TREE)
{
/* Gimplify internal functions created in the FEs. */
int nargs = call_expr_nargs (*from_p), i;
enum internal_fn ifn = CALL_EXPR_IFN (*from_p);
auto_vec<tree> vargs (nargs);
for (i = 0; i < nargs; i++)
{
gimplify_arg (&CALL_EXPR_ARG (*from_p, i), pre_p,
EXPR_LOCATION (*from_p));
vargs.quick_push (CALL_EXPR_ARG (*from_p, i));
}
call_stmt = gimple_build_call_internal_vec (ifn, vargs);
gimple_set_location (call_stmt, EXPR_LOCATION (*expr_p));
}
else
{
tree fnptrtype = TREE_TYPE (CALL_EXPR_FN (*from_p));
CALL_EXPR_FN (*from_p) = TREE_OPERAND (CALL_EXPR_FN (*from_p), 0);
STRIP_USELESS_TYPE_CONVERSION (CALL_EXPR_FN (*from_p));
tree fndecl = get_callee_fndecl (*from_p);
if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
&& call_expr_nargs (*from_p) == 3)
call_stmt = gimple_build_call_internal (IFN_BUILTIN_EXPECT, 3,
CALL_EXPR_ARG (*from_p, 0),
CALL_EXPR_ARG (*from_p, 1),
CALL_EXPR_ARG (*from_p, 2));
else
{
call_stmt = gimple_build_call_from_tree (*from_p);
gimple_call_set_fntype (call_stmt, TREE_TYPE (fnptrtype));
}
}
notice_special_calls (call_stmt);
if (!gimple_call_noreturn_p (call_stmt) || !should_remove_lhs_p (*to_p))
gimple_call_set_lhs (call_stmt, *to_p);
else if (TREE_CODE (*to_p) == SSA_NAME)
/* The above is somewhat premature, avoid ICEing later for a
SSA name w/o a definition. We may have uses in the GIMPLE IL.
??? This doesn't make it a default-def. */
SSA_NAME_DEF_STMT (*to_p) = gimple_build_nop ();
assign = call_stmt;
}
else
{
assign = gimple_build_assign (*to_p, *from_p);
gimple_set_location (assign, EXPR_LOCATION (*expr_p));
if (COMPARISON_CLASS_P (*from_p))
gimple_set_no_warning (assign, TREE_NO_WARNING (*from_p));
}
if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p))
{
/* We should have got an SSA name from the start. */
gcc_assert (TREE_CODE (*to_p) == SSA_NAME
|| ! gimple_in_ssa_p (cfun));
}
gimplify_seq_add_stmt (pre_p, assign);
gsi = gsi_last (*pre_p);
maybe_fold_stmt (&gsi);
if (want_value)
{
*expr_p = TREE_THIS_VOLATILE (*to_p) ? *from_p : unshare_expr (*to_p);
return GS_OK;
}
else
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Gimplify a comparison between two variable-sized objects. Do this
with a call to BUILT_IN_MEMCMP. */
static enum gimplify_status
gimplify_variable_sized_compare (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree t, arg, dest, src, expr;
arg = TYPE_SIZE_UNIT (TREE_TYPE (op0));
arg = unshare_expr (arg);
arg = SUBSTITUTE_PLACEHOLDER_IN_EXPR (arg, op0);
src = build_fold_addr_expr_loc (loc, op1);
dest = build_fold_addr_expr_loc (loc, op0);
t = builtin_decl_implicit (BUILT_IN_MEMCMP);
t = build_call_expr_loc (loc, t, 3, dest, src, arg);
expr
= build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node);
SET_EXPR_LOCATION (expr, loc);
*expr_p = expr;
return GS_OK;
}
/* Gimplify a comparison between two aggregate objects of integral scalar
mode as a comparison between the bitwise equivalent scalar values. */
static enum gimplify_status
gimplify_scalar_mode_aggregate_compare (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree type = TREE_TYPE (op0);
tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1);
op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op0);
op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op1);
*expr_p
= fold_build2_loc (loc, TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1);
return GS_OK;
}
/* Gimplify an expression sequence. This function gimplifies each
expression and rewrites the original expression with the last
expression of the sequence in GIMPLE form.
PRE_P points to the list where the side effects for all the
expressions in the sequence will be emitted.
WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */
static enum gimplify_status
gimplify_compound_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
{
tree t = *expr_p;
do
{
tree *sub_p = &TREE_OPERAND (t, 0);
if (TREE_CODE (*sub_p) == COMPOUND_EXPR)
gimplify_compound_expr (sub_p, pre_p, false);
else
gimplify_stmt (sub_p, pre_p);
t = TREE_OPERAND (t, 1);
}
while (TREE_CODE (t) == COMPOUND_EXPR);
*expr_p = t;
if (want_value)
return GS_OK;
else
{
gimplify_stmt (expr_p, pre_p);
return GS_ALL_DONE;
}
}
/* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to
gimplify. After gimplification, EXPR_P will point to a new temporary
that holds the original value of the SAVE_EXPR node.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_save_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
enum gimplify_status ret = GS_ALL_DONE;
tree val;
gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR);
val = TREE_OPERAND (*expr_p, 0);
/* If the SAVE_EXPR has not been resolved, then evaluate it once. */
if (!SAVE_EXPR_RESOLVED_P (*expr_p))
{
/* The operand may be a void-valued expression such as SAVE_EXPRs
generated by the Java frontend for class initialization. It is
being executed only for its side-effects. */
if (TREE_TYPE (val) == void_type_node)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_stmt, fb_none);
val = NULL;
}
else
/* The temporary may not be an SSA name as later abnormal and EH
control flow may invalidate use/def domination. */
val = get_initialized_tmp_var (val, pre_p, post_p, false);
TREE_OPERAND (*expr_p, 0) = val;
SAVE_EXPR_RESOLVED_P (*expr_p) = 1;
}
*expr_p = val;
return ret;
}
/* Rewrite the ADDR_EXPR node pointed to by EXPR_P
unary_expr
: ...
| '&' varname
...
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_addr_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree expr = *expr_p;
tree op0 = TREE_OPERAND (expr, 0);
enum gimplify_status ret;
location_t loc = EXPR_LOCATION (*expr_p);
switch (TREE_CODE (op0))
{
case INDIRECT_REF:
do_indirect_ref:
/* Check if we are dealing with an expression of the form '&*ptr'.
While the front end folds away '&*ptr' into 'ptr', these
expressions may be generated internally by the compiler (e.g.,
builtins like __builtin_va_end). */
/* Caution: the silent array decomposition semantics we allow for
ADDR_EXPR means we can't always discard the pair. */
/* Gimplification of the ADDR_EXPR operand may drop
cv-qualification conversions, so make sure we add them if
needed. */
{
tree op00 = TREE_OPERAND (op0, 0);
tree t_expr = TREE_TYPE (expr);
tree t_op00 = TREE_TYPE (op00);
if (!useless_type_conversion_p (t_expr, t_op00))
op00 = fold_convert_loc (loc, TREE_TYPE (expr), op00);
*expr_p = op00;
ret = GS_OK;
}
break;
case VIEW_CONVERT_EXPR:
/* Take the address of our operand and then convert it to the type of
this ADDR_EXPR.
??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at
all clear. The impact of this transformation is even less clear. */
/* If the operand is a useless conversion, look through it. Doing so
guarantees that the ADDR_EXPR and its operand will remain of the
same type. */
if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0)))
op0 = TREE_OPERAND (op0, 0);
*expr_p = fold_convert_loc (loc, TREE_TYPE (expr),
build_fold_addr_expr_loc (loc,
TREE_OPERAND (op0, 0)));
ret = GS_OK;
break;
case MEM_REF:
if (integer_zerop (TREE_OPERAND (op0, 1)))
goto do_indirect_ref;
/* fall through */
default:
/* If we see a call to a declared builtin or see its address
being taken (we can unify those cases here) then we can mark
the builtin for implicit generation by GCC. */
if (TREE_CODE (op0) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (op0) == BUILT_IN_NORMAL
&& builtin_decl_declared_p (DECL_FUNCTION_CODE (op0)))
set_builtin_decl_implicit_p (DECL_FUNCTION_CODE (op0), true);
/* We use fb_either here because the C frontend sometimes takes
the address of a call that returns a struct; see
gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make
the implied temporary explicit. */
/* Make the operand addressable. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p,
is_gimple_addressable, fb_either);
if (ret == GS_ERROR)
break;
/* Then mark it. Beware that it may not be possible to do so directly
if a temporary has been created by the gimplification. */
prepare_gimple_addressable (&TREE_OPERAND (expr, 0), pre_p);
op0 = TREE_OPERAND (expr, 0);
/* For various reasons, the gimplification of the expression
may have made a new INDIRECT_REF. */
if (TREE_CODE (op0) == INDIRECT_REF)
goto do_indirect_ref;
mark_addressable (TREE_OPERAND (expr, 0));
/* The FEs may end up building ADDR_EXPRs early on a decl with
an incomplete type. Re-build ADDR_EXPRs in canonical form
here. */
if (!types_compatible_p (TREE_TYPE (op0), TREE_TYPE (TREE_TYPE (expr))))
*expr_p = build_fold_addr_expr (op0);
/* Make sure TREE_CONSTANT and TREE_SIDE_EFFECTS are set properly. */
recompute_tree_invariant_for_addr_expr (*expr_p);
/* If we re-built the ADDR_EXPR add a conversion to the original type
if required. */
if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p)))
*expr_p = fold_convert (TREE_TYPE (expr), *expr_p);
break;
}
return ret;
}
/* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple
value; output operands should be a gimple lvalue. */
static enum gimplify_status
gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree expr;
int noutputs;
const char **oconstraints;
int i;
tree link;
const char *constraint;
bool allows_mem, allows_reg, is_inout;
enum gimplify_status ret, tret;
gasm *stmt;
vec<tree, va_gc> *inputs;
vec<tree, va_gc> *outputs;
vec<tree, va_gc> *clobbers;
vec<tree, va_gc> *labels;
tree link_next;
expr = *expr_p;
noutputs = list_length (ASM_OUTPUTS (expr));
oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *));
inputs = NULL;
outputs = NULL;
clobbers = NULL;
labels = NULL;
ret = GS_ALL_DONE;
link_next = NULL_TREE;
for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = link_next)
{
bool ok;
size_t constraint_len;
link_next = TREE_CHAIN (link);
oconstraints[i]
= constraint
= TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
constraint_len = strlen (constraint);
if (constraint_len == 0)
continue;
ok = parse_output_constraint (&constraint, i, 0, 0,
&allows_mem, &allows_reg, &is_inout);
if (!ok)
{
ret = GS_ERROR;
is_inout = false;
}
if (!allows_reg && allows_mem)
mark_addressable (TREE_VALUE (link));
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_inout ? is_gimple_min_lval : is_gimple_lvalue,
fb_lvalue | fb_mayfail);
if (tret == GS_ERROR)
{
error ("invalid lvalue in asm output %d", i);
ret = tret;
}
/* If the constraint does not allow memory make sure we gimplify
it to a register if it is not already but its base is. This
happens for complex and vector components. */
if (!allows_mem)
{
tree op = TREE_VALUE (link);
if (! is_gimple_val (op)
&& is_gimple_reg_type (TREE_TYPE (op))
&& is_gimple_reg (get_base_address (op)))
{
tree tem = create_tmp_reg (TREE_TYPE (op));
tree ass;
if (is_inout)
{
ass = build2 (MODIFY_EXPR, TREE_TYPE (tem),
tem, unshare_expr (op));
gimplify_and_add (ass, pre_p);
}
ass = build2 (MODIFY_EXPR, TREE_TYPE (tem), op, tem);
gimplify_and_add (ass, post_p);
TREE_VALUE (link) = tem;
tret = GS_OK;
}
}
vec_safe_push (outputs, link);
TREE_CHAIN (link) = NULL_TREE;
if (is_inout)
{
/* An input/output operand. To give the optimizers more
flexibility, split it into separate input and output
operands. */
tree input;
/* Buffer big enough to format a 32-bit UINT_MAX into. */
char buf[11];
/* Turn the in/out constraint into an output constraint. */
char *p = xstrdup (constraint);
p[0] = '=';
TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p);
/* And add a matching input constraint. */
if (allows_reg)
{
sprintf (buf, "%u", i);
/* If there are multiple alternatives in the constraint,
handle each of them individually. Those that allow register
will be replaced with operand number, the others will stay
unchanged. */
if (strchr (p, ',') != NULL)
{
size_t len = 0, buflen = strlen (buf);
char *beg, *end, *str, *dst;
for (beg = p + 1;;)
{
end = strchr (beg, ',');
if (end == NULL)
end = strchr (beg, '\0');
if ((size_t) (end - beg) < buflen)
len += buflen + 1;
else
len += end - beg + 1;
if (*end)
beg = end + 1;
else
break;
}
str = (char *) alloca (len);
for (beg = p + 1, dst = str;;)
{
const char *tem;
bool mem_p, reg_p, inout_p;
end = strchr (beg, ',');
if (end)
*end = '\0';
beg[-1] = '=';
tem = beg - 1;
parse_output_constraint (&tem, i, 0, 0,
&mem_p, ®_p, &inout_p);
if (dst != str)
*dst++ = ',';
if (reg_p)
{
memcpy (dst, buf, buflen);
dst += buflen;
}
else
{
if (end)
len = end - beg;
else
len = strlen (beg);
memcpy (dst, beg, len);
dst += len;
}
if (end)
beg = end + 1;
else
break;
}
*dst = '\0';
input = build_string (dst - str, str);
}
else
input = build_string (strlen (buf), buf);
}
else
input = build_string (constraint_len - 1, constraint + 1);
free (p);
input = build_tree_list (build_tree_list (NULL_TREE, input),
unshare_expr (TREE_VALUE (link)));
ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input);
}
}
link_next = NULL_TREE;
for (link = ASM_INPUTS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
parse_input_constraint (&constraint, 0, 0, noutputs, 0,
oconstraints, &allows_mem, &allows_reg);
/* If we can't make copies, we can only accept memory. */
if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (link))))
{
if (allows_mem)
allows_reg = 0;
else
{
error ("impossible constraint in %<asm%>");
error ("non-memory input %d must stay in memory", i);
return GS_ERROR;
}
}
/* If the operand is a memory input, it should be an lvalue. */
if (!allows_reg && allows_mem)
{
tree inputv = TREE_VALUE (link);
STRIP_NOPS (inputv);
if (TREE_CODE (inputv) == PREDECREMENT_EXPR
|| TREE_CODE (inputv) == PREINCREMENT_EXPR
|| TREE_CODE (inputv) == POSTDECREMENT_EXPR
|| TREE_CODE (inputv) == POSTINCREMENT_EXPR
|| TREE_CODE (inputv) == MODIFY_EXPR)
TREE_VALUE (link) = error_mark_node;
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_lvalue, fb_lvalue | fb_mayfail);
if (tret != GS_ERROR)
{
/* Unlike output operands, memory inputs are not guaranteed
to be lvalues by the FE, and while the expressions are
marked addressable there, if it is e.g. a statement
expression, temporaries in it might not end up being
addressable. They might be already used in the IL and thus
it is too late to make them addressable now though. */
tree x = TREE_VALUE (link);
while (handled_component_p (x))
x = TREE_OPERAND (x, 0);
if (TREE_CODE (x) == MEM_REF
&& TREE_CODE (TREE_OPERAND (x, 0)) == ADDR_EXPR)
x = TREE_OPERAND (TREE_OPERAND (x, 0), 0);
if ((VAR_P (x)
|| TREE_CODE (x) == PARM_DECL
|| TREE_CODE (x) == RESULT_DECL)
&& !TREE_ADDRESSABLE (x)
&& is_gimple_reg (x))
{
warning_at (EXPR_LOC_OR_LOC (TREE_VALUE (link),
input_location), 0,
"memory input %d is not directly addressable",
i);
prepare_gimple_addressable (&TREE_VALUE (link), pre_p);
}
}
mark_addressable (TREE_VALUE (link));
if (tret == GS_ERROR)
{
error_at (EXPR_LOC_OR_LOC (TREE_VALUE (link), input_location),
"memory input %d is not directly addressable", i);
ret = tret;
}
}
else
{
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_asm_val, fb_rvalue);
if (tret == GS_ERROR)
ret = tret;
}
TREE_CHAIN (link) = NULL_TREE;
vec_safe_push (inputs, link);
}
link_next = NULL_TREE;
for (link = ASM_CLOBBERS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
TREE_CHAIN (link) = NULL_TREE;
vec_safe_push (clobbers, link);
}
link_next = NULL_TREE;
for (link = ASM_LABELS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
TREE_CHAIN (link) = NULL_TREE;
vec_safe_push (labels, link);
}
/* Do not add ASMs with errors to the gimple IL stream. */
if (ret != GS_ERROR)
{
stmt = gimple_build_asm_vec (TREE_STRING_POINTER (ASM_STRING (expr)),
inputs, outputs, clobbers, labels);
gimple_asm_set_volatile (stmt, ASM_VOLATILE_P (expr) || noutputs == 0);
gimple_asm_set_input (stmt, ASM_INPUT_P (expr));
gimplify_seq_add_stmt (pre_p, stmt);
}
return ret;
}
/* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding
GIMPLE_WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while
gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we
return to this function.
FIXME should we complexify the prequeue handling instead? Or use flags
for all the cleanups and let the optimizer tighten them up? The current
code seems pretty fragile; it will break on a cleanup within any
non-conditional nesting. But any such nesting would be broken, anyway;
we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct
and continues out of it. We can do that at the RTL level, though, so
having an optimizer to tighten up try/finally regions would be a Good
Thing. */
static enum gimplify_status
gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p)
{
gimple_stmt_iterator iter;
gimple_seq body_sequence = NULL;
tree temp = voidify_wrapper_expr (*expr_p, NULL);
/* We only care about the number of conditions between the innermost
CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and
any cleanups collected outside the CLEANUP_POINT_EXPR. */
int old_conds = gimplify_ctxp->conditions;
gimple_seq old_cleanups = gimplify_ctxp->conditional_cleanups;
bool old_in_cleanup_point_expr = gimplify_ctxp->in_cleanup_point_expr;
gimplify_ctxp->conditions = 0;
gimplify_ctxp->conditional_cleanups = NULL;
gimplify_ctxp->in_cleanup_point_expr = true;
gimplify_stmt (&TREE_OPERAND (*expr_p, 0), &body_sequence);
gimplify_ctxp->conditions = old_conds;
gimplify_ctxp->conditional_cleanups = old_cleanups;
gimplify_ctxp->in_cleanup_point_expr = old_in_cleanup_point_expr;
for (iter = gsi_start (body_sequence); !gsi_end_p (iter); )
{
gimple *wce = gsi_stmt (iter);
if (gimple_code (wce) == GIMPLE_WITH_CLEANUP_EXPR)
{
if (gsi_one_before_end_p (iter))
{
/* Note that gsi_insert_seq_before and gsi_remove do not
scan operands, unlike some other sequence mutators. */
if (!gimple_wce_cleanup_eh_only (wce))
gsi_insert_seq_before_without_update (&iter,
gimple_wce_cleanup (wce),
GSI_SAME_STMT);
gsi_remove (&iter, true);
break;
}
else
{
gtry *gtry;
gimple_seq seq;
enum gimple_try_flags kind;
if (gimple_wce_cleanup_eh_only (wce))
kind = GIMPLE_TRY_CATCH;
else
kind = GIMPLE_TRY_FINALLY;
seq = gsi_split_seq_after (iter);
gtry = gimple_build_try (seq, gimple_wce_cleanup (wce), kind);
/* Do not use gsi_replace here, as it may scan operands.
We want to do a simple structural modification only. */
gsi_set_stmt (&iter, gtry);
iter = gsi_start (gtry->eval);
}
}
else
gsi_next (&iter);
}
gimplify_seq_add_seq (pre_p, body_sequence);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
else
{
*expr_p = NULL;
return GS_ALL_DONE;
}
}
/* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP
is the cleanup action required. EH_ONLY is true if the cleanup should
only be executed if an exception is thrown, not on normal exit.
If FORCE_UNCOND is true perform the cleanup unconditionally; this is
only valid for clobbers. */
static void
gimple_push_cleanup (tree var, tree cleanup, bool eh_only, gimple_seq *pre_p,
bool force_uncond = false)
{
gimple *wce;
gimple_seq cleanup_stmts = NULL;
/* Errors can result in improperly nested cleanups. Which results in
confusion when trying to resolve the GIMPLE_WITH_CLEANUP_EXPR. */
if (seen_error ())
return;
if (gimple_conditional_context ())
{
/* If we're in a conditional context, this is more complex. We only
want to run the cleanup if we actually ran the initialization that
necessitates it, but we want to run it after the end of the
conditional context. So we wrap the try/finally around the
condition and use a flag to determine whether or not to actually
run the destructor. Thus
test ? f(A()) : 0
becomes (approximately)
flag = 0;
try {
if (test) { A::A(temp); flag = 1; val = f(temp); }
else { val = 0; }
} finally {
if (flag) A::~A(temp);
}
val
*/
if (force_uncond)
{
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce);
}
else
{
tree flag = create_tmp_var (boolean_type_node, "cleanup");
gassign *ffalse = gimple_build_assign (flag, boolean_false_node);
gassign *ftrue = gimple_build_assign (flag, boolean_true_node);
cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL);
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, ffalse);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce);
gimplify_seq_add_stmt (pre_p, ftrue);
/* Because of this manipulation, and the EH edges that jump
threading cannot redirect, the temporary (VAR) will appear
to be used uninitialized. Don't warn. */
TREE_NO_WARNING (var) = 1;
}
}
else
{
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimple_wce_set_cleanup_eh_only (wce, eh_only);
gimplify_seq_add_stmt (pre_p, wce);
}
}
/* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */
static enum gimplify_status
gimplify_target_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree targ = *expr_p;
tree temp = TARGET_EXPR_SLOT (targ);
tree init = TARGET_EXPR_INITIAL (targ);
enum gimplify_status ret;
bool unpoison_empty_seq = false;
gimple_stmt_iterator unpoison_it;
if (init)
{
tree cleanup = NULL_TREE;
/* TARGET_EXPR temps aren't part of the enclosing block, so add it
to the temps list. Handle also variable length TARGET_EXPRs. */
if (TREE_CODE (DECL_SIZE (temp)) != INTEGER_CST)
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (temp)))
gimplify_type_sizes (TREE_TYPE (temp), pre_p);
gimplify_vla_decl (temp, pre_p);
}
else
{
/* Save location where we need to place unpoisoning. It's possible
that a variable will be converted to needs_to_live_in_memory. */
unpoison_it = gsi_last (*pre_p);
unpoison_empty_seq = gsi_end_p (unpoison_it);
gimple_add_tmp_var (temp);
}
/* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the
expression is supposed to initialize the slot. */
if (VOID_TYPE_P (TREE_TYPE (init)))
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
else
{
tree init_expr = build2 (INIT_EXPR, void_type_node, temp, init);
init = init_expr;
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
init = NULL;
ggc_free (init_expr);
}
if (ret == GS_ERROR)
{
/* PR c++/28266 Make sure this is expanded only once. */
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
return GS_ERROR;
}
if (init)
gimplify_and_add (init, pre_p);
/* If needed, push the cleanup for the temp. */
if (TARGET_EXPR_CLEANUP (targ))
{
if (CLEANUP_EH_ONLY (targ))
gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ),
CLEANUP_EH_ONLY (targ), pre_p);
else
cleanup = TARGET_EXPR_CLEANUP (targ);
}
/* Add a clobber for the temporary going out of scope, like
gimplify_bind_expr. */
if (gimplify_ctxp->in_cleanup_point_expr
&& needs_to_live_in_memory (temp))
{
if (flag_stack_reuse == SR_ALL)
{
tree clobber = build_constructor (TREE_TYPE (temp),
NULL);
TREE_THIS_VOLATILE (clobber) = true;
clobber = build2 (MODIFY_EXPR, TREE_TYPE (temp), temp, clobber);
gimple_push_cleanup (temp, clobber, false, pre_p, true);
}
if (asan_poisoned_variables && dbg_cnt (asan_use_after_scope))
{
tree asan_cleanup = build_asan_poison_call_expr (temp);
if (asan_cleanup)
{
if (unpoison_empty_seq)
unpoison_it = gsi_start (*pre_p);
asan_poison_variable (temp, false, &unpoison_it,
unpoison_empty_seq);
gimple_push_cleanup (temp, asan_cleanup, false, pre_p);
}
}
}
if (cleanup)
gimple_push_cleanup (temp, cleanup, false, pre_p);
/* Only expand this once. */
TREE_OPERAND (targ, 3) = init;
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
}
else
/* We should have expanded this before. */
gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp));
*expr_p = temp;
return GS_OK;
}
/* Gimplification of expression trees. */
/* Gimplify an expression which appears at statement context. The
corresponding GIMPLE statements are added to *SEQ_P. If *SEQ_P is
NULL, a new sequence is allocated.
Return true if we actually added a statement to the queue. */
bool
gimplify_stmt (tree *stmt_p, gimple_seq *seq_p)
{
gimple_seq_node last;
last = gimple_seq_last (*seq_p);
gimplify_expr (stmt_p, seq_p, NULL, is_gimple_stmt, fb_none);
return last != gimple_seq_last (*seq_p);
}
/* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels
to CTX. If entries already exist, force them to be some flavor of private.
If there is no enclosing parallel, do nothing. */
void
omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
if (decl == NULL || !DECL_P (decl) || ctx->region_type == ORT_NONE)
return;
do
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN);
else if (n->value & GOVD_MAP)
n->value |= GOVD_MAP_TO_ONLY;
else
return;
}
else if ((ctx->region_type & ORT_TARGET) != 0)
{
if (ctx->target_map_scalars_firstprivate)
omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE);
else
omp_add_variable (ctx, decl, GOVD_MAP | GOVD_MAP_TO_ONLY);
}
else if (ctx->region_type != ORT_WORKSHARE
&& ctx->region_type != ORT_SIMD
&& ctx->region_type != ORT_ACC
&& !(ctx->region_type & ORT_TARGET_DATA))
omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE);
ctx = ctx->outer_context;
}
while (ctx);
}
/* Similarly for each of the type sizes of TYPE. */
static void
omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type)
{
if (type == NULL || type == error_mark_node)
return;
type = TYPE_MAIN_VARIANT (type);
if (ctx->privatized_types->add (type))
return;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type));
omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type));
break;
case ARRAY_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type));
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field));
}
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
break;
default:
break;
}
omp_firstprivatize_variable (ctx, TYPE_SIZE (type));
omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type));
lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type);
}
/* Add an entry for DECL in the OMP context CTX with FLAGS. */
static void
omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags)
{
splay_tree_node n;
unsigned int nflags;
tree t;
if (error_operand_p (decl) || ctx->region_type == ORT_NONE)
return;
/* Never elide decls whose type has TREE_ADDRESSABLE set. This means
there are constructors involved somewhere. Exception is a shared clause,
there is nothing privatized in that case. */
if ((flags & GOVD_SHARED) == 0
&& (TREE_ADDRESSABLE (TREE_TYPE (decl))
|| TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl))))
flags |= GOVD_SEEN;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0)
{
/* We shouldn't be re-adding the decl with the same data
sharing class. */
gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0);
nflags = n->value | flags;
/* The only combination of data sharing classes we should see is
FIRSTPRIVATE and LASTPRIVATE. However, OpenACC permits
reduction variables to be used in data sharing clauses. */
gcc_assert ((ctx->region_type & ORT_ACC) != 0
|| ((nflags & GOVD_DATA_SHARE_CLASS)
== (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE))
|| (flags & GOVD_DATA_SHARE_CLASS) == 0);
n->value = nflags;
return;
}
/* When adding a variable-sized variable, we have to handle all sorts
of additional bits of data: the pointer replacement variable, and
the parameters of the type. */
if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
/* Add the pointer replacement variable as PRIVATE if the variable
replacement is private, else FIRSTPRIVATE since we'll need the
address of the original variable either for SHARED, or for the
copy into or out of the context. */
if (!(flags & GOVD_LOCAL))
{
if (flags & GOVD_MAP)
nflags = GOVD_MAP | GOVD_MAP_TO_ONLY | GOVD_EXPLICIT;
else if (flags & GOVD_PRIVATE)
nflags = GOVD_PRIVATE;
else if ((ctx->region_type & (ORT_TARGET | ORT_TARGET_DATA)) != 0
&& (flags & GOVD_FIRSTPRIVATE))
nflags = GOVD_PRIVATE | GOVD_EXPLICIT;
else
nflags = GOVD_FIRSTPRIVATE;
nflags |= flags & GOVD_SEEN;
t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
omp_add_variable (ctx, t, nflags);
}
/* Add all of the variable and type parameters (which should have
been gimplified to a formal temporary) as FIRSTPRIVATE. */
omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl));
omp_firstprivatize_variable (ctx, DECL_SIZE (decl));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* The variable-sized variable itself is never SHARED, only some form
of PRIVATE. The sharing would take place via the pointer variable
which we remapped above. */
if (flags & GOVD_SHARED)
flags = GOVD_SHARED | GOVD_DEBUG_PRIVATE
| (flags & (GOVD_SEEN | GOVD_EXPLICIT));
/* We're going to make use of the TYPE_SIZE_UNIT at least in the
alloca statement we generate for the variable, so make sure it
is available. This isn't automatically needed for the SHARED
case, since we won't be allocating local storage then.
For local variables TYPE_SIZE_UNIT might not be gimplified yet,
in this case omp_notice_variable will be called later
on when it is gimplified. */
else if (! (flags & (GOVD_LOCAL | GOVD_MAP))
&& DECL_P (TYPE_SIZE_UNIT (TREE_TYPE (decl))))
omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true);
}
else if ((flags & (GOVD_MAP | GOVD_LOCAL)) == 0
&& lang_hooks.decls.omp_privatize_by_reference (decl))
{
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* Similar to the direct variable sized case above, we'll need the
size of references being privatized. */
if ((flags & GOVD_SHARED) == 0)
{
t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
if (DECL_P (t))
omp_notice_variable (ctx, t, true);
}
}
if (n != NULL)
n->value |= flags;
else
splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags);
/* For reductions clauses in OpenACC loop directives, by default create a
copy clause on the enclosing parallel construct for carrying back the
results. */
if (ctx->region_type == ORT_ACC && (flags & GOVD_REDUCTION))
{
struct gimplify_omp_ctx *outer_ctx = ctx->outer_context;
while (outer_ctx)
{
n = splay_tree_lookup (outer_ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
/* Ignore local variables and explicitly declared clauses. */
if (n->value & (GOVD_LOCAL | GOVD_EXPLICIT))
break;
else if (outer_ctx->region_type == ORT_ACC_KERNELS)
{
/* According to the OpenACC spec, such a reduction variable
should already have a copy map on a kernels construct,
verify that here. */
gcc_assert (!(n->value & GOVD_FIRSTPRIVATE)
&& (n->value & GOVD_MAP));
}
else if (outer_ctx->region_type == ORT_ACC_PARALLEL)
{
/* Remove firstprivate and make it a copy map. */
n->value &= ~GOVD_FIRSTPRIVATE;
n->value |= GOVD_MAP;
}
}
else if (outer_ctx->region_type == ORT_ACC_PARALLEL)
{
splay_tree_insert (outer_ctx->variables, (splay_tree_key)decl,
GOVD_MAP | GOVD_SEEN);
break;
}
outer_ctx = outer_ctx->outer_context;
}
}
}
/* Notice a threadprivate variable DECL used in OMP context CTX.
This just prints out diagnostics about threadprivate variable uses
in untied tasks. If DECL2 is non-NULL, prevent this warning
on that variable. */
static bool
omp_notice_threadprivate_variable (struct gimplify_omp_ctx *ctx, tree decl,
tree decl2)
{
splay_tree_node n;
struct gimplify_omp_ctx *octx;
for (octx = ctx; octx; octx = octx->outer_context)
if ((octx->region_type & ORT_TARGET) != 0)
{
n = splay_tree_lookup (octx->variables, (splay_tree_key)decl);
if (n == NULL)
{
error ("threadprivate variable %qE used in target region",
DECL_NAME (decl));
error_at (octx->location, "enclosing target region");
splay_tree_insert (octx->variables, (splay_tree_key)decl, 0);
}
if (decl2)
splay_tree_insert (octx->variables, (splay_tree_key)decl2, 0);
}
if (ctx->region_type != ORT_UNTIED_TASK)
return false;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n == NULL)
{
error ("threadprivate variable %qE used in untied task",
DECL_NAME (decl));
error_at (ctx->location, "enclosing task");
splay_tree_insert (ctx->variables, (splay_tree_key)decl, 0);
}
if (decl2)
splay_tree_insert (ctx->variables, (splay_tree_key)decl2, 0);
return false;
}
/* Return true if global var DECL is device resident. */
static bool
device_resident_p (tree decl)
{
tree attr = lookup_attribute ("oacc declare target", DECL_ATTRIBUTES (decl));
if (!attr)
return false;
for (tree t = TREE_VALUE (attr); t; t = TREE_PURPOSE (t))
{
tree c = TREE_VALUE (t);
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DEVICE_RESIDENT)
return true;
}
return false;
}
/* Return true if DECL has an ACC DECLARE attribute. */
static bool
is_oacc_declared (tree decl)
{
tree t = TREE_CODE (decl) == MEM_REF ? TREE_OPERAND (decl, 0) : decl;
tree declared = lookup_attribute ("oacc declare target", DECL_ATTRIBUTES (t));
return declared != NULL_TREE;
}
/* Determine outer default flags for DECL mentioned in an OMP region
but not declared in an enclosing clause.
??? Some compiler-generated variables (like SAVE_EXPRs) could be
remapped firstprivate instead of shared. To some extent this is
addressed in omp_firstprivatize_type_sizes, but not
effectively. */
static unsigned
omp_default_clause (struct gimplify_omp_ctx *ctx, tree decl,
bool in_code, unsigned flags)
{
enum omp_clause_default_kind default_kind = ctx->default_kind;
enum omp_clause_default_kind kind;
kind = lang_hooks.decls.omp_predetermined_sharing (decl);
if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED)
default_kind = kind;
switch (default_kind)
{
case OMP_CLAUSE_DEFAULT_NONE:
{
const char *rtype;
if (ctx->region_type & ORT_PARALLEL)
rtype = "parallel";
else if (ctx->region_type & ORT_TASK)
rtype = "task";
else if (ctx->region_type & ORT_TEAMS)
rtype = "teams";
else
gcc_unreachable ();
error ("%qE not specified in enclosing %qs",
DECL_NAME (lang_hooks.decls.omp_report_decl (decl)), rtype);
error_at (ctx->location, "enclosing %qs", rtype);
}
/* FALLTHRU */
case OMP_CLAUSE_DEFAULT_SHARED:
flags |= GOVD_SHARED;
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
flags |= GOVD_PRIVATE;
break;
case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE:
flags |= GOVD_FIRSTPRIVATE;
break;
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
/* decl will be either GOVD_FIRSTPRIVATE or GOVD_SHARED. */
gcc_assert ((ctx->region_type & ORT_TASK) != 0);
if (struct gimplify_omp_ctx *octx = ctx->outer_context)
{
omp_notice_variable (octx, decl, in_code);
for (; octx; octx = octx->outer_context)
{
splay_tree_node n2;
n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl);
if ((octx->region_type & (ORT_TARGET_DATA | ORT_TARGET)) != 0
&& (n2 == NULL || (n2->value & GOVD_DATA_SHARE_CLASS) == 0))
continue;
if (n2 && (n2->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED)
{
flags |= GOVD_FIRSTPRIVATE;
goto found_outer;
}
if ((octx->region_type & (ORT_PARALLEL | ORT_TEAMS)) != 0)
{
flags |= GOVD_SHARED;
goto found_outer;
}
}
}
if (TREE_CODE (decl) == PARM_DECL
|| (!is_global_var (decl)
&& DECL_CONTEXT (decl) == current_function_decl))
flags |= GOVD_FIRSTPRIVATE;
else
flags |= GOVD_SHARED;
found_outer:
break;
default:
gcc_unreachable ();
}
return flags;
}
/* Determine outer default flags for DECL mentioned in an OACC region
but not declared in an enclosing clause. */
static unsigned
oacc_default_clause (struct gimplify_omp_ctx *ctx, tree decl, unsigned flags)
{
const char *rkind;
bool on_device = false;
bool declared = is_oacc_declared (decl);
tree type = TREE_TYPE (decl);
if (lang_hooks.decls.omp_privatize_by_reference (decl))
type = TREE_TYPE (type);
if ((ctx->region_type & (ORT_ACC_PARALLEL | ORT_ACC_KERNELS)) != 0
&& is_global_var (decl)
&& device_resident_p (decl))
{
on_device = true;
flags |= GOVD_MAP_TO_ONLY;
}
switch (ctx->region_type)
{
default:
gcc_unreachable ();
case ORT_ACC_KERNELS:
/* Scalars are default 'copy' under kernels, non-scalars are default
'present_or_copy'. */
flags |= GOVD_MAP;
if (!AGGREGATE_TYPE_P (type))
flags |= GOVD_MAP_FORCE;
rkind = "kernels";
break;
case ORT_ACC_PARALLEL:
{
if (on_device || AGGREGATE_TYPE_P (type) || declared)
/* Aggregates default to 'present_or_copy'. */
flags |= GOVD_MAP;
else
/* Scalars default to 'firstprivate'. */
flags |= GOVD_FIRSTPRIVATE;
rkind = "parallel";
}
break;
}
if (DECL_ARTIFICIAL (decl))
; /* We can get compiler-generated decls, and should not complain
about them. */
else if (ctx->default_kind == OMP_CLAUSE_DEFAULT_NONE)
{
error ("%qE not specified in enclosing OpenACC %qs construct",
DECL_NAME (lang_hooks.decls.omp_report_decl (decl)), rkind);
inform (ctx->location, "enclosing OpenACC %qs construct", rkind);
}
else
gcc_checking_assert (ctx->default_kind == OMP_CLAUSE_DEFAULT_SHARED);
return flags;
}
/* Record the fact that DECL was used within the OMP context CTX.
IN_CODE is true when real code uses DECL, and false when we should
merely emit default(none) errors. Return true if DECL is going to
be remapped and thus DECL shouldn't be gimplified into its
DECL_VALUE_EXPR (if any). */
static bool
omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code)
{
splay_tree_node n;
unsigned flags = in_code ? GOVD_SEEN : 0;
bool ret = false, shared;
if (error_operand_p (decl))
return false;
if (ctx->region_type == ORT_NONE)
return lang_hooks.decls.omp_disregard_value_expr (decl, false);
if (is_global_var (decl))
{
/* Threadprivate variables are predetermined. */
if (DECL_THREAD_LOCAL_P (decl))
return omp_notice_threadprivate_variable (ctx, decl, NULL_TREE);
if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value = get_base_address (DECL_VALUE_EXPR (decl));
if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value))
return omp_notice_threadprivate_variable (ctx, decl, value);
}
if (gimplify_omp_ctxp->outer_context == NULL
&& VAR_P (decl)
&& oacc_get_fn_attrib (current_function_decl))
{
location_t loc = DECL_SOURCE_LOCATION (decl);
if (lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
{
error_at (loc,
"%qE with %<link%> clause used in %<routine%> function",
DECL_NAME (decl));
return false;
}
else if (!lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (decl)))
{
error_at (loc,
"%qE requires a %<declare%> directive for use "
"in a %<routine%> function", DECL_NAME (decl));
return false;
}
}
}
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if ((ctx->region_type & ORT_TARGET) != 0)
{
ret = lang_hooks.decls.omp_disregard_value_expr (decl, true);
if (n == NULL)
{
unsigned nflags = flags;
if (ctx->target_map_pointers_as_0len_arrays
|| ctx->target_map_scalars_firstprivate)
{
bool is_declare_target = false;
bool is_scalar = false;
if (is_global_var (decl)
&& varpool_node::get_create (decl)->offloadable)
{
struct gimplify_omp_ctx *octx;
for (octx = ctx->outer_context;
octx; octx = octx->outer_context)
{
n = splay_tree_lookup (octx->variables,
(splay_tree_key)decl);
if (n
&& (n->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED
&& (n->value & GOVD_DATA_SHARE_CLASS) != 0)
break;
}
is_declare_target = octx == NULL;
}
if (!is_declare_target && ctx->target_map_scalars_firstprivate)
is_scalar = lang_hooks.decls.omp_scalar_p (decl);
if (is_declare_target)
;
else if (ctx->target_map_pointers_as_0len_arrays
&& (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
|| (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (decl)))
== POINTER_TYPE)))
nflags |= GOVD_MAP | GOVD_MAP_0LEN_ARRAY;
else if (is_scalar)
nflags |= GOVD_FIRSTPRIVATE;
}
struct gimplify_omp_ctx *octx = ctx->outer_context;
if ((ctx->region_type & ORT_ACC) && octx)
{
/* Look in outer OpenACC contexts, to see if there's a
data attribute for this variable. */
omp_notice_variable (octx, decl, in_code);
for (; octx; octx = octx->outer_context)
{
if (!(octx->region_type & (ORT_TARGET_DATA | ORT_TARGET)))
break;
splay_tree_node n2
= splay_tree_lookup (octx->variables,
(splay_tree_key) decl);
if (n2)
{
if (octx->region_type == ORT_ACC_HOST_DATA)
error ("variable %qE declared in enclosing "
"%<host_data%> region", DECL_NAME (decl));
nflags |= GOVD_MAP;
if (octx->region_type == ORT_ACC_DATA
&& (n2->value & GOVD_MAP_0LEN_ARRAY))
nflags |= GOVD_MAP_0LEN_ARRAY;
goto found_outer;
}
}
}
{
tree type = TREE_TYPE (decl);
if (nflags == flags
&& gimplify_omp_ctxp->target_firstprivatize_array_bases
&& lang_hooks.decls.omp_privatize_by_reference (decl))
type = TREE_TYPE (type);
if (nflags == flags
&& !lang_hooks.types.omp_mappable_type (type))
{
error ("%qD referenced in target region does not have "
"a mappable type", decl);
nflags |= GOVD_MAP | GOVD_EXPLICIT;
}
else if (nflags == flags)
{
if ((ctx->region_type & ORT_ACC) != 0)
nflags = oacc_default_clause (ctx, decl, flags);
else
nflags |= GOVD_MAP;
}
}
found_outer:
omp_add_variable (ctx, decl, nflags);
}
else
{
/* If nothing changed, there's nothing left to do. */
if ((n->value & flags) == flags)
return ret;
flags |= n->value;
n->value = flags;
}
goto do_outer;
}
if (n == NULL)
{
if (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_SIMD
|| ctx->region_type == ORT_ACC
|| (ctx->region_type & ORT_TARGET_DATA) != 0)
goto do_outer;
flags = omp_default_clause (ctx, decl, in_code, flags);
if ((flags & GOVD_PRIVATE)
&& lang_hooks.decls.omp_private_outer_ref (decl))
flags |= GOVD_PRIVATE_OUTER_REF;
omp_add_variable (ctx, decl, flags);
shared = (flags & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
goto do_outer;
}
if ((n->value & (GOVD_SEEN | GOVD_LOCAL)) == 0
&& (flags & (GOVD_SEEN | GOVD_LOCAL)) == GOVD_SEEN
&& DECL_SIZE (decl))
{
if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
splay_tree_node n2;
tree t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
n2 = splay_tree_lookup (ctx->variables, (splay_tree_key) t);
n2->value |= GOVD_SEEN;
}
else if (lang_hooks.decls.omp_privatize_by_reference (decl)
&& TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))
&& (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))))
!= INTEGER_CST))
{
splay_tree_node n2;
tree t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
gcc_assert (DECL_P (t));
n2 = splay_tree_lookup (ctx->variables, (splay_tree_key) t);
if (n2)
omp_notice_variable (ctx, t, true);
}
}
shared = ((flags | n->value) & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
/* If nothing changed, there's nothing left to do. */
if ((n->value & flags) == flags)
return ret;
flags |= n->value;
n->value = flags;
do_outer:
/* If the variable is private in the current context, then we don't
need to propagate anything to an outer context. */
if ((flags & GOVD_PRIVATE) && !(flags & GOVD_PRIVATE_OUTER_REF))
return ret;
if ((flags & (GOVD_LINEAR | GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
== (GOVD_LINEAR | GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
return ret;
if ((flags & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE
| GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
== (GOVD_LASTPRIVATE | GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
return ret;
if (ctx->outer_context
&& omp_notice_variable (ctx->outer_context, decl, in_code))
return true;
return ret;
}
/* Verify that DECL is private within CTX. If there's specific information
to the contrary in the innermost scope, generate an error. */
static bool
omp_is_private (struct gimplify_omp_ctx *ctx, tree decl, int simd)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
{
if (ctx == gimplify_omp_ctxp)
{
if (simd)
error ("iteration variable %qE is predetermined linear",
DECL_NAME (decl));
else
error ("iteration variable %qE should be private",
DECL_NAME (decl));
n->value = GOVD_PRIVATE;
return true;
}
else
return false;
}
else if ((n->value & GOVD_EXPLICIT) != 0
&& (ctx == gimplify_omp_ctxp
|| (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx)))
{
if ((n->value & GOVD_FIRSTPRIVATE) != 0)
error ("iteration variable %qE should not be firstprivate",
DECL_NAME (decl));
else if ((n->value & GOVD_REDUCTION) != 0)
error ("iteration variable %qE should not be reduction",
DECL_NAME (decl));
else if (simd == 0 && (n->value & GOVD_LINEAR) != 0)
error ("iteration variable %qE should not be linear",
DECL_NAME (decl));
else if (simd == 1 && (n->value & GOVD_LASTPRIVATE) != 0)
error ("iteration variable %qE should not be lastprivate",
DECL_NAME (decl));
else if (simd && (n->value & GOVD_PRIVATE) != 0)
error ("iteration variable %qE should not be private",
DECL_NAME (decl));
else if (simd == 2 && (n->value & GOVD_LINEAR) != 0)
error ("iteration variable %qE is predetermined linear",
DECL_NAME (decl));
}
return (ctx == gimplify_omp_ctxp
|| (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx));
}
if (ctx->region_type != ORT_WORKSHARE
&& ctx->region_type != ORT_SIMD
&& ctx->region_type != ORT_ACC)
return false;
else if (ctx->outer_context)
return omp_is_private (ctx->outer_context, decl, simd);
return false;
}
/* Return true if DECL is private within a parallel region
that binds to the current construct's context or in parallel
region's REDUCTION clause. */
static bool
omp_check_private (struct gimplify_omp_ctx *ctx, tree decl, bool copyprivate)
{
splay_tree_node n;
do
{
ctx = ctx->outer_context;
if (ctx == NULL)
{
if (is_global_var (decl))
return false;
/* References might be private, but might be shared too,
when checking for copyprivate, assume they might be
private, otherwise assume they might be shared. */
if (copyprivate)
return true;
if (lang_hooks.decls.omp_privatize_by_reference (decl))
return false;
/* Treat C++ privatized non-static data members outside
of the privatization the same. */
if (omp_member_access_dummy_var (decl))
return false;
return true;
}
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if ((ctx->region_type & (ORT_TARGET | ORT_TARGET_DATA)) != 0
&& (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0))
continue;
if (n != NULL)
{
if ((n->value & GOVD_LOCAL) != 0
&& omp_member_access_dummy_var (decl))
return false;
return (n->value & GOVD_SHARED) == 0;
}
}
while (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_SIMD
|| ctx->region_type == ORT_ACC);
return false;
}
/* Callback for walk_tree to find a DECL_EXPR for the given DECL. */
static tree
find_decl_expr (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
/* If this node has been visited, unmark it and keep looking. */
if (TREE_CODE (t) == DECL_EXPR && DECL_EXPR_DECL (t) == (tree) data)
return t;
if (IS_TYPE_OR_DECL_P (t))
*walk_subtrees = 0;
return NULL_TREE;
}
/* Scan the OMP clauses in *LIST_P, installing mappings into a new
and previous omp contexts. */
static void
gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
enum omp_region_type region_type,
enum tree_code code)
{
struct gimplify_omp_ctx *ctx, *outer_ctx;
tree c;
hash_map<tree, tree> *struct_map_to_clause = NULL;
tree *prev_list_p = NULL;
ctx = new_omp_context (region_type);
outer_ctx = ctx->outer_context;
if (code == OMP_TARGET)
{
if (!lang_GNU_Fortran ())
ctx->target_map_pointers_as_0len_arrays = true;
ctx->target_map_scalars_firstprivate = true;
}
if (!lang_GNU_Fortran ())
switch (code)
{
case OMP_TARGET:
case OMP_TARGET_DATA:
case OMP_TARGET_ENTER_DATA:
case OMP_TARGET_EXIT_DATA:
case OACC_DECLARE:
case OACC_HOST_DATA:
ctx->target_firstprivatize_array_bases = true;
default:
break;
}
while ((c = *list_p) != NULL)
{
bool remove = false;
bool notice_outer = true;
const char *check_non_private = NULL;
unsigned int flags;
tree decl;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
flags = GOVD_PRIVATE | GOVD_EXPLICIT;
if (lang_hooks.decls.omp_private_outer_ref (OMP_CLAUSE_DECL (c)))
{
flags |= GOVD_PRIVATE_OUTER_REF;
OMP_CLAUSE_PRIVATE_OUTER_REF (c) = 1;
}
else
notice_outer = false;
goto do_add;
case OMP_CLAUSE_SHARED:
flags = GOVD_SHARED | GOVD_EXPLICIT;
goto do_add;
case OMP_CLAUSE_FIRSTPRIVATE:
flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT;
check_non_private = "firstprivate";
goto do_add;
case OMP_CLAUSE_LASTPRIVATE:
flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT;
check_non_private = "lastprivate";
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
goto do_add;
else if (outer_ctx
&& (outer_ctx->region_type == ORT_COMBINED_PARALLEL
|| outer_ctx->region_type == ORT_COMBINED_TEAMS)
&& splay_tree_lookup (outer_ctx->variables,
(splay_tree_key) decl) == NULL)
{
omp_add_variable (outer_ctx, decl, GOVD_SHARED | GOVD_SEEN);
if (outer_ctx->outer_context)
omp_notice_variable (outer_ctx->outer_context, decl, true);
}
else if (outer_ctx
&& (outer_ctx->region_type & ORT_TASK) != 0
&& outer_ctx->combined_loop
&& splay_tree_lookup (outer_ctx->variables,
(splay_tree_key) decl) == NULL)
{
omp_add_variable (outer_ctx, decl, GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer_ctx->outer_context)
omp_notice_variable (outer_ctx->outer_context, decl, true);
}
else if (outer_ctx
&& (outer_ctx->region_type == ORT_WORKSHARE
|| outer_ctx->region_type == ORT_ACC)
&& outer_ctx->combined_loop
&& splay_tree_lookup (outer_ctx->variables,
(splay_tree_key) decl) == NULL
&& !omp_check_private (outer_ctx, decl, false))
{
omp_add_variable (outer_ctx, decl, GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer_ctx->outer_context
&& (outer_ctx->outer_context->region_type
== ORT_COMBINED_PARALLEL)
&& splay_tree_lookup (outer_ctx->outer_context->variables,
(splay_tree_key) decl) == NULL)
{
struct gimplify_omp_ctx *octx = outer_ctx->outer_context;
omp_add_variable (octx, decl, GOVD_SHARED | GOVD_SEEN);
if (octx->outer_context)
{
octx = octx->outer_context;
if (octx->region_type == ORT_WORKSHARE
&& octx->combined_loop
&& splay_tree_lookup (octx->variables,
(splay_tree_key) decl) == NULL
&& !omp_check_private (octx, decl, false))
{
omp_add_variable (octx, decl,
GOVD_LASTPRIVATE | GOVD_SEEN);
octx = octx->outer_context;
if (octx
&& octx->region_type == ORT_COMBINED_TEAMS
&& (splay_tree_lookup (octx->variables,
(splay_tree_key) decl)
== NULL))
{
omp_add_variable (octx, decl,
GOVD_SHARED | GOVD_SEEN);
octx = octx->outer_context;
}
}
if (octx)
omp_notice_variable (octx, decl, true);
}
}
else if (outer_ctx->outer_context)
omp_notice_variable (outer_ctx->outer_context, decl, true);
}
goto do_add;
case OMP_CLAUSE_REDUCTION:
flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT;
/* OpenACC permits reductions on private variables. */
if (!(region_type & ORT_ACC))
check_non_private = "reduction";
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) == MEM_REF)
{
tree type = TREE_TYPE (decl);
if (gimplify_expr (&TYPE_MAX_VALUE (TYPE_DOMAIN (type)), pre_p,
NULL, is_gimple_val, fb_rvalue, false)
== GS_ERROR)
{
remove = true;
break;
}
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
if (DECL_P (v))
{
omp_firstprivatize_variable (ctx, v);
omp_notice_variable (ctx, v, true);
}
decl = TREE_OPERAND (decl, 0);
if (TREE_CODE (decl) == POINTER_PLUS_EXPR)
{
if (gimplify_expr (&TREE_OPERAND (decl, 1), pre_p,
NULL, is_gimple_val, fb_rvalue, false)
== GS_ERROR)
{
remove = true;
break;
}
v = TREE_OPERAND (decl, 1);
if (DECL_P (v))
{
omp_firstprivatize_variable (ctx, v);
omp_notice_variable (ctx, v, true);
}
decl = TREE_OPERAND (decl, 0);
}
if (TREE_CODE (decl) == ADDR_EXPR
|| TREE_CODE (decl) == INDIRECT_REF)
decl = TREE_OPERAND (decl, 0);
}
goto do_add_decl;
case OMP_CLAUSE_LINEAR:
if (gimplify_expr (&OMP_CLAUSE_LINEAR_STEP (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
else
{
if (code == OMP_SIMD
&& !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
{
struct gimplify_omp_ctx *octx = outer_ctx;
if (octx
&& octx->region_type == ORT_WORKSHARE
&& octx->combined_loop
&& !octx->distribute)
{
if (octx->outer_context
&& (octx->outer_context->region_type
== ORT_COMBINED_PARALLEL))
octx = octx->outer_context->outer_context;
else
octx = octx->outer_context;
}
if (octx
&& octx->region_type == ORT_WORKSHARE
&& octx->combined_loop
&& octx->distribute)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<linear%> clause for variable other than "
"loop iterator specified on construct "
"combined with %<distribute%>");
remove = true;
break;
}
}
/* For combined #pragma omp parallel for simd, need to put
lastprivate and perhaps firstprivate too on the
parallel. Similarly for #pragma omp for simd. */
struct gimplify_omp_ctx *octx = outer_ctx;
decl = NULL_TREE;
do
{
if (OMP_CLAUSE_LINEAR_NO_COPYIN (c)
&& OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
break;
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
decl = NULL_TREE;
break;
}
flags = GOVD_SEEN;
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
flags |= GOVD_FIRSTPRIVATE;
if (!OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
flags |= GOVD_LASTPRIVATE;
if (octx
&& octx->region_type == ORT_WORKSHARE
&& octx->combined_loop)
{
if (octx->outer_context
&& (octx->outer_context->region_type
== ORT_COMBINED_PARALLEL))
octx = octx->outer_context;
else if (omp_check_private (octx, decl, false))
break;
}
else if (octx
&& (octx->region_type & ORT_TASK) != 0
&& octx->combined_loop)
;
else if (octx
&& octx->region_type == ORT_COMBINED_PARALLEL
&& ctx->region_type == ORT_WORKSHARE
&& octx == outer_ctx)
flags = GOVD_SEEN | GOVD_SHARED;
else if (octx
&& octx->region_type == ORT_COMBINED_TEAMS)
flags = GOVD_SEEN | GOVD_SHARED;
else if (octx
&& octx->region_type == ORT_COMBINED_TARGET)
{
flags &= ~GOVD_LASTPRIVATE;
if (flags == GOVD_SEEN)
break;
}
else
break;
splay_tree_node on
= splay_tree_lookup (octx->variables,
(splay_tree_key) decl);
if (on && (on->value & GOVD_DATA_SHARE_CLASS) != 0)
{
octx = NULL;
break;
}
omp_add_variable (octx, decl, flags);
if (octx->outer_context == NULL)
break;
octx = octx->outer_context;
}
while (1);
if (octx
&& decl
&& (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)
|| !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
omp_notice_variable (octx, decl, true);
}
flags = GOVD_LINEAR | GOVD_EXPLICIT;
if (OMP_CLAUSE_LINEAR_NO_COPYIN (c)
&& OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
{
notice_outer = false;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
goto do_add;
case OMP_CLAUSE_MAP:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
remove = true;
switch (code)
{
case OMP_TARGET:
break;
case OACC_DATA:
if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
break;
/* FALLTHRU */
case OMP_TARGET_DATA:
case OMP_TARGET_ENTER_DATA:
case OMP_TARGET_EXIT_DATA:
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_HOST_DATA:
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE))
/* For target {,enter ,exit }data only the array slice is
mapped, but not the pointer to it. */
remove = true;
break;
default:
break;
}
if (remove)
break;
if (DECL_P (decl) && outer_ctx && (region_type & ORT_ACC))
{
struct gimplify_omp_ctx *octx;
for (octx = outer_ctx; octx; octx = octx->outer_context)
{
if (octx->region_type != ORT_ACC_HOST_DATA)
break;
splay_tree_node n2
= splay_tree_lookup (octx->variables,
(splay_tree_key) decl);
if (n2)
error_at (OMP_CLAUSE_LOCATION (c), "variable %qE "
"declared in enclosing %<host_data%> region",
DECL_NAME (decl));
}
}
if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_P (decl) ? DECL_SIZE_UNIT (decl)
: TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (gimplify_expr (&OMP_CLAUSE_SIZE (c), pre_p,
NULL, is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
else if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE))
&& TREE_CODE (OMP_CLAUSE_SIZE (c)) != INTEGER_CST)
{
OMP_CLAUSE_SIZE (c)
= get_initialized_tmp_var (OMP_CLAUSE_SIZE (c), pre_p, NULL,
false);
omp_add_variable (ctx, OMP_CLAUSE_SIZE (c),
GOVD_FIRSTPRIVATE | GOVD_SEEN);
}
if (!DECL_P (decl))
{
tree d = decl, *pd;
if (TREE_CODE (d) == ARRAY_REF)
{
while (TREE_CODE (d) == ARRAY_REF)
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == COMPONENT_REF
&& TREE_CODE (TREE_TYPE (d)) == ARRAY_TYPE)
decl = d;
}
pd = &OMP_CLAUSE_DECL (c);
if (d == decl
&& TREE_CODE (decl) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE))
{
pd = &TREE_OPERAND (decl, 0);
decl = TREE_OPERAND (decl, 0);
}
if (TREE_CODE (decl) == COMPONENT_REF)
{
while (TREE_CODE (decl) == COMPONENT_REF)
decl = TREE_OPERAND (decl, 0);
if (TREE_CODE (decl) == INDIRECT_REF
&& DECL_P (TREE_OPERAND (decl, 0))
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE))
decl = TREE_OPERAND (decl, 0);
}
if (gimplify_expr (pd, pre_p, NULL, is_gimple_lvalue, fb_lvalue)
== GS_ERROR)
{
remove = true;
break;
}
if (DECL_P (decl))
{
if (error_operand_p (decl))
{
remove = true;
break;
}
tree stype = TREE_TYPE (decl);
if (TREE_CODE (stype) == REFERENCE_TYPE)
stype = TREE_TYPE (stype);
if (TYPE_SIZE_UNIT (stype) == NULL
|| TREE_CODE (TYPE_SIZE_UNIT (stype)) != INTEGER_CST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"mapping field %qE of variable length "
"structure", OMP_CLAUSE_DECL (c));
remove = true;
break;
}
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER)
{
/* Error recovery. */
if (prev_list_p == NULL)
{
remove = true;
break;
}
if (OMP_CLAUSE_CHAIN (*prev_list_p) != c)
{
tree ch = OMP_CLAUSE_CHAIN (*prev_list_p);
if (ch == NULL_TREE || OMP_CLAUSE_CHAIN (ch) != c)
{
remove = true;
break;
}
}
}
tree offset;
HOST_WIDE_INT bitsize, bitpos;
machine_mode mode;
int unsignedp, reversep, volatilep = 0;
tree base = OMP_CLAUSE_DECL (c);
while (TREE_CODE (base) == ARRAY_REF)
base = TREE_OPERAND (base, 0);
if (TREE_CODE (base) == INDIRECT_REF)
base = TREE_OPERAND (base, 0);
base = get_inner_reference (base, &bitsize, &bitpos, &offset,
&mode, &unsignedp, &reversep,
&volatilep);
tree orig_base = base;
if ((TREE_CODE (base) == INDIRECT_REF
|| (TREE_CODE (base) == MEM_REF
&& integer_zerop (TREE_OPERAND (base, 1))))
&& DECL_P (TREE_OPERAND (base, 0))
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0)))
== REFERENCE_TYPE))
base = TREE_OPERAND (base, 0);
gcc_assert (base == decl
&& (offset == NULL_TREE
|| TREE_CODE (offset) == INTEGER_CST));
splay_tree_node n
= splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
bool ptr = (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_ALWAYS_POINTER);
if (n == NULL || (n->value & GOVD_MAP) == 0)
{
tree l = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (l, GOMP_MAP_STRUCT);
if (orig_base != base)
OMP_CLAUSE_DECL (l) = unshare_expr (orig_base);
else
OMP_CLAUSE_DECL (l) = decl;
OMP_CLAUSE_SIZE (l) = size_int (1);
if (struct_map_to_clause == NULL)
struct_map_to_clause = new hash_map<tree, tree>;
struct_map_to_clause->put (decl, l);
if (ptr)
{
enum gomp_map_kind mkind
= code == OMP_TARGET_EXIT_DATA
? GOMP_MAP_RELEASE : GOMP_MAP_ALLOC;
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
OMP_CLAUSE_DECL (c2)
= unshare_expr (OMP_CLAUSE_DECL (c));
OMP_CLAUSE_CHAIN (c2) = *prev_list_p;
OMP_CLAUSE_SIZE (c2)
= TYPE_SIZE_UNIT (ptr_type_node);
OMP_CLAUSE_CHAIN (l) = c2;
if (OMP_CLAUSE_CHAIN (*prev_list_p) != c)
{
tree c4 = OMP_CLAUSE_CHAIN (*prev_list_p);
tree c3
= build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c3, mkind);
OMP_CLAUSE_DECL (c3)
= unshare_expr (OMP_CLAUSE_DECL (c4));
OMP_CLAUSE_SIZE (c3)
= TYPE_SIZE_UNIT (ptr_type_node);
OMP_CLAUSE_CHAIN (c3) = *prev_list_p;
OMP_CLAUSE_CHAIN (c2) = c3;
}
*prev_list_p = l;
prev_list_p = NULL;
}
else
{
OMP_CLAUSE_CHAIN (l) = c;
*list_p = l;
list_p = &OMP_CLAUSE_CHAIN (l);
}
if (orig_base != base && code == OMP_TARGET)
{
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
enum gomp_map_kind mkind
= GOMP_MAP_FIRSTPRIVATE_REFERENCE;
OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
OMP_CLAUSE_DECL (c2) = decl;
OMP_CLAUSE_SIZE (c2) = size_zero_node;
OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (l);
OMP_CLAUSE_CHAIN (l) = c2;
}
flags = GOVD_MAP | GOVD_EXPLICIT;
if (GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c)) || ptr)
flags |= GOVD_SEEN;
goto do_add_decl;
}
else
{
tree *osc = struct_map_to_clause->get (decl);
tree *sc = NULL, *scp = NULL;
if (GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c)) || ptr)
n->value |= GOVD_SEEN;
offset_int o1, o2;
if (offset)
o1 = wi::to_offset (offset);
else
o1 = 0;
if (bitpos)
o1 = o1 + bitpos / BITS_PER_UNIT;
sc = &OMP_CLAUSE_CHAIN (*osc);
if (*sc != c
&& (OMP_CLAUSE_MAP_KIND (*sc)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE))
sc = &OMP_CLAUSE_CHAIN (*sc);
for (; *sc != c; sc = &OMP_CLAUSE_CHAIN (*sc))
if (ptr && sc == prev_list_p)
break;
else if (TREE_CODE (OMP_CLAUSE_DECL (*sc))
!= COMPONENT_REF
&& (TREE_CODE (OMP_CLAUSE_DECL (*sc))
!= INDIRECT_REF)
&& (TREE_CODE (OMP_CLAUSE_DECL (*sc))
!= ARRAY_REF))
break;
else
{
tree offset2;
HOST_WIDE_INT bitsize2, bitpos2;
base = OMP_CLAUSE_DECL (*sc);
if (TREE_CODE (base) == ARRAY_REF)
{
while (TREE_CODE (base) == ARRAY_REF)
base = TREE_OPERAND (base, 0);
if (TREE_CODE (base) != COMPONENT_REF
|| (TREE_CODE (TREE_TYPE (base))
!= ARRAY_TYPE))
break;
}
else if (TREE_CODE (base) == INDIRECT_REF
&& (TREE_CODE (TREE_OPERAND (base, 0))
== COMPONENT_REF)
&& (TREE_CODE (TREE_TYPE
(TREE_OPERAND (base, 0)))
== REFERENCE_TYPE))
base = TREE_OPERAND (base, 0);
base = get_inner_reference (base, &bitsize2,
&bitpos2, &offset2,
&mode, &unsignedp,
&reversep, &volatilep);
if ((TREE_CODE (base) == INDIRECT_REF
|| (TREE_CODE (base) == MEM_REF
&& integer_zerop (TREE_OPERAND (base,
1))))
&& DECL_P (TREE_OPERAND (base, 0))
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (base,
0)))
== REFERENCE_TYPE))
base = TREE_OPERAND (base, 0);
if (base != decl)
break;
if (scp)
continue;
gcc_assert (offset == NULL_TREE
|| TREE_CODE (offset) == INTEGER_CST);
tree d1 = OMP_CLAUSE_DECL (*sc);
tree d2 = OMP_CLAUSE_DECL (c);
while (TREE_CODE (d1) == ARRAY_REF)
d1 = TREE_OPERAND (d1, 0);
while (TREE_CODE (d2) == ARRAY_REF)
d2 = TREE_OPERAND (d2, 0);
if (TREE_CODE (d1) == INDIRECT_REF)
d1 = TREE_OPERAND (d1, 0);
if (TREE_CODE (d2) == INDIRECT_REF)
d2 = TREE_OPERAND (d2, 0);
while (TREE_CODE (d1) == COMPONENT_REF)
if (TREE_CODE (d2) == COMPONENT_REF
&& TREE_OPERAND (d1, 1)
== TREE_OPERAND (d2, 1))
{
d1 = TREE_OPERAND (d1, 0);
d2 = TREE_OPERAND (d2, 0);
}
else
break;
if (d1 == d2)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in map "
"clauses", OMP_CLAUSE_DECL (c));
remove = true;
break;
}
if (offset2)
o2 = wi::to_offset (offset2);
else
o2 = 0;
if (bitpos2)
o2 = o2 + bitpos2 / BITS_PER_UNIT;
if (wi::ltu_p (o1, o2)
|| (wi::eq_p (o1, o2) && bitpos < bitpos2))
{
if (ptr)
scp = sc;
else
break;
}
}
if (remove)
break;
OMP_CLAUSE_SIZE (*osc)
= size_binop (PLUS_EXPR, OMP_CLAUSE_SIZE (*osc),
size_one_node);
if (ptr)
{
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
tree cl = NULL_TREE;
enum gomp_map_kind mkind
= code == OMP_TARGET_EXIT_DATA
? GOMP_MAP_RELEASE : GOMP_MAP_ALLOC;
OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
OMP_CLAUSE_DECL (c2)
= unshare_expr (OMP_CLAUSE_DECL (c));
OMP_CLAUSE_CHAIN (c2) = scp ? *scp : *prev_list_p;
OMP_CLAUSE_SIZE (c2)
= TYPE_SIZE_UNIT (ptr_type_node);
cl = scp ? *prev_list_p : c2;
if (OMP_CLAUSE_CHAIN (*prev_list_p) != c)
{
tree c4 = OMP_CLAUSE_CHAIN (*prev_list_p);
tree c3
= build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c3, mkind);
OMP_CLAUSE_DECL (c3)
= unshare_expr (OMP_CLAUSE_DECL (c4));
OMP_CLAUSE_SIZE (c3)
= TYPE_SIZE_UNIT (ptr_type_node);
OMP_CLAUSE_CHAIN (c3) = *prev_list_p;
if (!scp)
OMP_CLAUSE_CHAIN (c2) = c3;
else
cl = c3;
}
if (scp)
*scp = c2;
if (sc == prev_list_p)
{
*sc = cl;
prev_list_p = NULL;
}
else
{
*prev_list_p = OMP_CLAUSE_CHAIN (c);
list_p = prev_list_p;
prev_list_p = NULL;
OMP_CLAUSE_CHAIN (c) = *sc;
*sc = cl;
continue;
}
}
else if (*sc != c)
{
*list_p = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = *sc;
*sc = c;
continue;
}
}
}
if (!remove
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_POINTER
&& OMP_CLAUSE_CHAIN (c)
&& OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (c)) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_ALWAYS_POINTER))
prev_list_p = list_p;
break;
}
flags = GOVD_MAP | GOVD_EXPLICIT;
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TO
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TOFROM)
flags |= GOVD_MAP_ALWAYS_TO;
goto do_add;
case OMP_CLAUSE_DEPEND:
if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
tree deps = OMP_CLAUSE_DECL (c);
while (deps && TREE_CODE (deps) == TREE_LIST)
{
if (TREE_CODE (TREE_PURPOSE (deps)) == TRUNC_DIV_EXPR
&& DECL_P (TREE_OPERAND (TREE_PURPOSE (deps), 1)))
gimplify_expr (&TREE_OPERAND (TREE_PURPOSE (deps), 1),
pre_p, NULL, is_gimple_val, fb_rvalue);
deps = TREE_CHAIN (deps);
}
break;
}
else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)
break;
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPOUND_EXPR)
{
gimplify_expr (&TREE_OPERAND (OMP_CLAUSE_DECL (c), 0), pre_p,
NULL, is_gimple_val, fb_rvalue);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
}
if (error_operand_p (OMP_CLAUSE_DECL (c)))
{
remove = true;
break;
}
OMP_CLAUSE_DECL (c) = build_fold_addr_expr (OMP_CLAUSE_DECL (c));
if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
break;
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE__CACHE_:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_P (decl) ? DECL_SIZE_UNIT (decl)
: TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (gimplify_expr (&OMP_CLAUSE_SIZE (c), pre_p,
NULL, is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
if (!DECL_P (decl))
{
if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p,
NULL, is_gimple_lvalue, fb_lvalue)
== GS_ERROR)
{
remove = true;
break;
}
break;
}
goto do_notice;
case OMP_CLAUSE_USE_DEVICE_PTR:
flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT;
goto do_add;
case OMP_CLAUSE_IS_DEVICE_PTR:
flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT;
goto do_add;
do_add:
decl = OMP_CLAUSE_DECL (c);
do_add_decl:
if (error_operand_p (decl))
{
remove = true;
break;
}
if (DECL_NAME (decl) == NULL_TREE && (flags & GOVD_SHARED) == 0)
{
tree t = omp_member_access_dummy_var (decl);
if (t)
{
tree v = DECL_VALUE_EXPR (decl);
DECL_NAME (decl) = DECL_NAME (TREE_OPERAND (v, 1));
if (outer_ctx)
omp_notice_variable (outer_ctx, t, true);
}
}
if (code == OACC_DATA
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
flags |= GOVD_MAP_0LEN_ARRAY;
omp_add_variable (ctx, decl, flags);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c),
GOVD_LOCAL | GOVD_SEEN);
if (OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
&& walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c),
find_decl_expr,
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c),
NULL) == NULL_TREE)
omp_add_variable (ctx,
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c),
GOVD_LOCAL | GOVD_SEEN);
gimplify_omp_ctxp = ctx;
push_gimplify_context ();
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c)));
push_gimplify_context ();
gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c)));
OMP_CLAUSE_REDUCTION_INIT (c) = NULL_TREE;
OMP_CLAUSE_REDUCTION_MERGE (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_STMT (c))
{
gimplify_omp_ctxp = ctx;
push_gimplify_context ();
if (TREE_CODE (OMP_CLAUSE_LASTPRIVATE_STMT (c)) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL,
NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = OMP_CLAUSE_LASTPRIVATE_STMT (c);
OMP_CLAUSE_LASTPRIVATE_STMT (c) = bind;
}
gimplify_and_add (OMP_CLAUSE_LASTPRIVATE_STMT (c),
&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)));
OMP_CLAUSE_LASTPRIVATE_STMT (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_STMT (c))
{
gimplify_omp_ctxp = ctx;
push_gimplify_context ();
if (TREE_CODE (OMP_CLAUSE_LINEAR_STMT (c)) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL,
NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = OMP_CLAUSE_LINEAR_STMT (c);
OMP_CLAUSE_LINEAR_STMT (c) = bind;
}
gimplify_and_add (OMP_CLAUSE_LINEAR_STMT (c),
&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c)));
OMP_CLAUSE_LINEAR_STMT (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
if (notice_outer)
goto do_notice;
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_COPYPRIVATE
&& !remove
&& !omp_check_private (ctx, decl, true))
{
remove = true;
if (is_global_var (decl))
{
if (DECL_THREAD_LOCAL_P (decl))
remove = false;
else if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value = get_base_address (DECL_VALUE_EXPR (decl));
if (value
&& DECL_P (value)
&& DECL_THREAD_LOCAL_P (value))
remove = false;
}
}
if (remove)
error_at (OMP_CLAUSE_LOCATION (c),
"copyprivate variable %qE is not threadprivate"
" or private in outer context", DECL_NAME (decl));
}
do_notice:
if (outer_ctx)
omp_notice_variable (outer_ctx, decl, true);
if (check_non_private
&& region_type == ORT_WORKSHARE
&& (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
|| decl == OMP_CLAUSE_DECL (c)
|| (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
&& (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0))
== ADDR_EXPR
|| (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0))
== POINTER_PLUS_EXPR
&& (TREE_CODE (TREE_OPERAND (TREE_OPERAND
(OMP_CLAUSE_DECL (c), 0), 0))
== ADDR_EXPR)))))
&& omp_check_private (ctx, decl, false))
{
error ("%s variable %qE is private in outer context",
check_non_private, DECL_NAME (decl));
remove = true;
}
break;
case OMP_CLAUSE_IF:
if (OMP_CLAUSE_IF_MODIFIER (c) != ERROR_MARK
&& OMP_CLAUSE_IF_MODIFIER (c) != code)
{
const char *p[2];
for (int i = 0; i < 2; i++)
switch (i ? OMP_CLAUSE_IF_MODIFIER (c) : code)
{
case OMP_PARALLEL: p[i] = "parallel"; break;
case OMP_TASK: p[i] = "task"; break;
case OMP_TASKLOOP: p[i] = "taskloop"; break;
case OMP_TARGET_DATA: p[i] = "target data"; break;
case OMP_TARGET: p[i] = "target"; break;
case OMP_TARGET_UPDATE: p[i] = "target update"; break;
case OMP_TARGET_ENTER_DATA:
p[i] = "target enter data"; break;
case OMP_TARGET_EXIT_DATA: p[i] = "target exit data"; break;
default: gcc_unreachable ();
}
error_at (OMP_CLAUSE_LOCATION (c),
"expected %qs %<if%> clause modifier rather than %qs",
p[0], p[1]);
remove = true;
}
/* Fall through. */
case OMP_CLAUSE_FINAL:
OMP_CLAUSE_OPERAND (c, 0)
= gimple_boolify (OMP_CLAUSE_OPERAND (c, 0));
/* Fall through. */
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE__CILK_FOR_COUNT_:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
remove = true;
break;
case OMP_CLAUSE_GANG:
if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
remove = true;
if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 1), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
remove = true;
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
break;
case OMP_CLAUSE_DEFAULTMAP:
ctx->target_map_scalars_firstprivate = false;
break;
case OMP_CLAUSE_ALIGNED:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (gimplify_expr (&OMP_CLAUSE_ALIGNED_ALIGNMENT (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
if (!is_global_var (decl)
&& TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
omp_add_variable (ctx, decl, GOVD_ALIGNED);
break;
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
default:
gcc_unreachable ();
}
if (code == OACC_DATA
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
remove = true;
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
gimplify_omp_ctxp = ctx;
if (struct_map_to_clause)
delete struct_map_to_clause;
}
/* Return true if DECL is a candidate for shared to firstprivate
optimization. We only consider non-addressable scalars, not
too big, and not references. */
static bool
omp_shared_to_firstprivate_optimizable_decl_p (tree decl)
{
if (TREE_ADDRESSABLE (decl))
return false;
tree type = TREE_TYPE (decl);
if (!is_gimple_reg_type (type)
|| TREE_CODE (type) == REFERENCE_TYPE
|| TREE_ADDRESSABLE (type))
return false;
/* Don't optimize too large decls, as each thread/task will have
its own. */
HOST_WIDE_INT len = int_size_in_bytes (type);
if (len == -1 || len > 4 * POINTER_SIZE / BITS_PER_UNIT)
return false;
if (lang_hooks.decls.omp_privatize_by_reference (decl))
return false;
return true;
}
/* Helper function of omp_find_stores_op and gimplify_adjust_omp_clauses*.
For omp_shared_to_firstprivate_optimizable_decl_p decl mark it as
GOVD_WRITTEN in outer contexts. */
static void
omp_mark_stores (struct gimplify_omp_ctx *ctx, tree decl)
{
for (; ctx; ctx = ctx->outer_context)
{
splay_tree_node n = splay_tree_lookup (ctx->variables,
(splay_tree_key) decl);
if (n == NULL)
continue;
else if (n->value & GOVD_SHARED)
{
n->value |= GOVD_WRITTEN;
return;
}
else if (n->value & GOVD_DATA_SHARE_CLASS)
return;
}
}
/* Helper callback for walk_gimple_seq to discover possible stores
to omp_shared_to_firstprivate_optimizable_decl_p decls and set
GOVD_WRITTEN if they are GOVD_SHARED in some outer context
for those. */
static tree
omp_find_stores_op (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
*walk_subtrees = 0;
if (!wi->is_lhs)
return NULL_TREE;
tree op = *tp;
do
{
if (handled_component_p (op))
op = TREE_OPERAND (op, 0);
else if ((TREE_CODE (op) == MEM_REF || TREE_CODE (op) == TARGET_MEM_REF)
&& TREE_CODE (TREE_OPERAND (op, 0)) == ADDR_EXPR)
op = TREE_OPERAND (TREE_OPERAND (op, 0), 0);
else
break;
}
while (1);
if (!DECL_P (op) || !omp_shared_to_firstprivate_optimizable_decl_p (op))
return NULL_TREE;
omp_mark_stores (gimplify_omp_ctxp, op);
return NULL_TREE;
}
/* Helper callback for walk_gimple_seq to discover possible stores
to omp_shared_to_firstprivate_optimizable_decl_p decls and set
GOVD_WRITTEN if they are GOVD_SHARED in some outer context
for those. */
static tree
omp_find_stores_stmt (gimple_stmt_iterator *gsi_p,
bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
switch (gimple_code (stmt))
{
/* Don't recurse on OpenMP constructs for which
gimplify_adjust_omp_clauses already handled the bodies,
except handle gimple_omp_for_pre_body. */
case GIMPLE_OMP_FOR:
*handled_ops_p = true;
if (gimple_omp_for_pre_body (stmt))
walk_gimple_seq (gimple_omp_for_pre_body (stmt),
omp_find_stores_stmt, omp_find_stores_op, wi);
break;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_CRITICAL:
*handled_ops_p = true;
break;
default:
break;
}
return NULL_TREE;
}
struct gimplify_adjust_omp_clauses_data
{
tree *list_p;
gimple_seq *pre_p;
};
/* For all variables that were not actually used within the context,
remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */
static int
gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data)
{
tree *list_p = ((struct gimplify_adjust_omp_clauses_data *) data)->list_p;
gimple_seq *pre_p
= ((struct gimplify_adjust_omp_clauses_data *) data)->pre_p;
tree decl = (tree) n->key;
unsigned flags = n->value;
enum omp_clause_code code;
tree clause;
bool private_debug;
if (flags & (GOVD_EXPLICIT | GOVD_LOCAL))
return 0;
if ((flags & GOVD_SEEN) == 0)
return 0;
if (flags & GOVD_DEBUG_PRIVATE)
{
gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_SHARED);
private_debug = true;
}
else if (flags & GOVD_MAP)
private_debug = false;
else
private_debug
= lang_hooks.decls.omp_private_debug_clause (decl,
!!(flags & GOVD_SHARED));
if (private_debug)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_MAP)
{
code = OMP_CLAUSE_MAP;
if ((gimplify_omp_ctxp->region_type & ORT_ACC) == 0
&& TYPE_ATOMIC (strip_array_types (TREE_TYPE (decl))))
{
error ("%<_Atomic%> %qD in implicit %<map%> clause", decl);
return 0;
}
}
else if (flags & GOVD_SHARED)
{
if (is_global_var (decl))
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context;
while (ctx != NULL)
{
splay_tree_node on
= splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (on && (on->value & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE
| GOVD_PRIVATE | GOVD_REDUCTION
| GOVD_LINEAR | GOVD_MAP)) != 0)
break;
ctx = ctx->outer_context;
}
if (ctx == NULL)
return 0;
}
code = OMP_CLAUSE_SHARED;
}
else if (flags & GOVD_PRIVATE)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_FIRSTPRIVATE)
{
code = OMP_CLAUSE_FIRSTPRIVATE;
if ((gimplify_omp_ctxp->region_type & ORT_TARGET)
&& (gimplify_omp_ctxp->region_type & ORT_ACC) == 0
&& TYPE_ATOMIC (strip_array_types (TREE_TYPE (decl))))
{
error ("%<_Atomic%> %qD in implicit %<firstprivate%> clause on "
"%<target%> construct", decl);
return 0;
}
}
else if (flags & GOVD_LASTPRIVATE)
code = OMP_CLAUSE_LASTPRIVATE;
else if (flags & GOVD_ALIGNED)
return 0;
else
gcc_unreachable ();
if (((flags & GOVD_LASTPRIVATE)
|| (code == OMP_CLAUSE_SHARED && (flags & GOVD_WRITTEN)))
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
tree chain = *list_p;
clause = build_omp_clause (input_location, code);
OMP_CLAUSE_DECL (clause) = decl;
OMP_CLAUSE_CHAIN (clause) = chain;
if (private_debug)
OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1;
else if (code == OMP_CLAUSE_PRIVATE && (flags & GOVD_PRIVATE_OUTER_REF))
OMP_CLAUSE_PRIVATE_OUTER_REF (clause) = 1;
else if (code == OMP_CLAUSE_SHARED
&& (flags & GOVD_WRITTEN) == 0
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
OMP_CLAUSE_SHARED_READONLY (clause) = 1;
else if (code == OMP_CLAUSE_FIRSTPRIVATE && (flags & GOVD_EXPLICIT) == 0)
OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (clause) = 1;
else if (code == OMP_CLAUSE_MAP && (flags & GOVD_MAP_0LEN_ARRAY) != 0)
{
tree nc = build_omp_clause (input_location, OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (nc) = decl;
if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == POINTER_TYPE)
OMP_CLAUSE_DECL (clause)
= build_simple_mem_ref_loc (input_location, decl);
OMP_CLAUSE_DECL (clause)
= build2 (MEM_REF, char_type_node, OMP_CLAUSE_DECL (clause),
build_int_cst (build_pointer_type (char_type_node), 0));
OMP_CLAUSE_SIZE (clause) = size_zero_node;
OMP_CLAUSE_SIZE (nc) = size_zero_node;
OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_ALLOC);
OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (clause) = 1;
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_FIRSTPRIVATE_POINTER);
OMP_CLAUSE_CHAIN (nc) = chain;
OMP_CLAUSE_CHAIN (clause) = nc;
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
gimplify_omp_ctxp = ctx->outer_context;
gimplify_expr (&TREE_OPERAND (OMP_CLAUSE_DECL (clause), 0),
pre_p, NULL, is_gimple_val, fb_rvalue);
gimplify_omp_ctxp = ctx;
}
else if (code == OMP_CLAUSE_MAP)
{
int kind = (flags & GOVD_MAP_TO_ONLY
? GOMP_MAP_TO
: GOMP_MAP_TOFROM);
if (flags & GOVD_MAP_FORCE)
kind |= GOMP_MAP_FLAG_FORCE;
OMP_CLAUSE_SET_MAP_KIND (clause, kind);
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
tree mem = build_simple_mem_ref (decl2);
OMP_CLAUSE_DECL (clause) = mem;
OMP_CLAUSE_SIZE (clause) = TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (gimplify_omp_ctxp->outer_context)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context;
omp_notice_variable (ctx, decl2, true);
omp_notice_variable (ctx, OMP_CLAUSE_SIZE (clause), true);
}
tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (clause),
OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_SIZE (nc) = size_zero_node;
if (gimplify_omp_ctxp->target_firstprivatize_array_bases)
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_FIRSTPRIVATE_POINTER);
else
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_POINTER);
OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (clause);
OMP_CLAUSE_CHAIN (clause) = nc;
}
else if (gimplify_omp_ctxp->target_firstprivatize_array_bases
&& lang_hooks.decls.omp_privatize_by_reference (decl))
{
OMP_CLAUSE_DECL (clause) = build_simple_mem_ref (decl);
OMP_CLAUSE_SIZE (clause)
= unshare_expr (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))));
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
gimplify_omp_ctxp = ctx->outer_context;
gimplify_expr (&OMP_CLAUSE_SIZE (clause),
pre_p, NULL, is_gimple_val, fb_rvalue);
gimplify_omp_ctxp = ctx;
tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (clause),
OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_SIZE (nc) = size_zero_node;
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_FIRSTPRIVATE_REFERENCE);
OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (clause);
OMP_CLAUSE_CHAIN (clause) = nc;
}
else
OMP_CLAUSE_SIZE (clause) = DECL_SIZE_UNIT (decl);
}
if (code == OMP_CLAUSE_FIRSTPRIVATE && (flags & GOVD_LASTPRIVATE) != 0)
{
tree nc = build_omp_clause (input_location, OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (nc) = 1;
OMP_CLAUSE_CHAIN (nc) = chain;
OMP_CLAUSE_CHAIN (clause) = nc;
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
gimplify_omp_ctxp = ctx->outer_context;
lang_hooks.decls.omp_finish_clause (nc, pre_p);
gimplify_omp_ctxp = ctx;
}
*list_p = clause;
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
gimplify_omp_ctxp = ctx->outer_context;
lang_hooks.decls.omp_finish_clause (clause, pre_p);
if (gimplify_omp_ctxp)
for (; clause != chain; clause = OMP_CLAUSE_CHAIN (clause))
if (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP
&& DECL_P (OMP_CLAUSE_SIZE (clause)))
omp_notice_variable (gimplify_omp_ctxp, OMP_CLAUSE_SIZE (clause),
true);
gimplify_omp_ctxp = ctx;
return 0;
}
static void
gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
enum tree_code code)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
tree c, decl;
if (body)
{
struct gimplify_omp_ctx *octx;
for (octx = ctx; octx; octx = octx->outer_context)
if ((octx->region_type & (ORT_PARALLEL | ORT_TASK | ORT_TEAMS)) != 0)
break;
if (octx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
walk_gimple_seq (body, omp_find_stores_stmt,
omp_find_stores_op, &wi);
}
}
while ((c = *list_p) != NULL)
{
splay_tree_node n;
bool remove = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_FIRSTPRIVATE:
if ((ctx->region_type & ORT_TARGET)
&& (ctx->region_type & ORT_ACC) == 0
&& TYPE_ATOMIC (strip_array_types
(TREE_TYPE (OMP_CLAUSE_DECL (c)))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qD in %<firstprivate%> clause on "
"%<target%> construct", OMP_CLAUSE_DECL (c));
remove = true;
break;
}
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_LINEAR:
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
remove = !(n->value & GOVD_SEEN);
if (! remove)
{
bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED;
if ((n->value & GOVD_DEBUG_PRIVATE)
|| lang_hooks.decls.omp_private_debug_clause (decl, shared))
{
gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0
|| ((n->value & GOVD_DATA_SHARE_CLASS)
== GOVD_SHARED));
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_PRIVATE_DEBUG (c) = 1;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& (n->value & GOVD_WRITTEN) == 0
&& DECL_P (decl)
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
OMP_CLAUSE_SHARED_READONLY (c) = 1;
else if (DECL_P (decl)
&& ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& (n->value & GOVD_WRITTEN) != 1)
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
}
break;
case OMP_CLAUSE_LASTPRIVATE:
/* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to
accurately reflect the presence of a FIRSTPRIVATE clause. */
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
= (n->value & GOVD_FIRSTPRIVATE) != 0;
if (code == OMP_DISTRIBUTE
&& OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
{
remove = true;
error_at (OMP_CLAUSE_LOCATION (c),
"same variable used in %<firstprivate%> and "
"%<lastprivate%> clauses on %<distribute%> "
"construct");
}
if (!remove
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& DECL_P (decl)
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
break;
case OMP_CLAUSE_ALIGNED:
decl = OMP_CLAUSE_DECL (c);
if (!is_global_var (decl))
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
remove = n == NULL || !(n->value & GOVD_SEEN);
if (!remove && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
struct gimplify_omp_ctx *octx;
if (n != NULL
&& (n->value & (GOVD_DATA_SHARE_CLASS
& ~GOVD_FIRSTPRIVATE)))
remove = true;
else
for (octx = ctx->outer_context; octx;
octx = octx->outer_context)
{
n = splay_tree_lookup (octx->variables,
(splay_tree_key) decl);
if (n == NULL)
continue;
if (n->value & GOVD_LOCAL)
break;
/* We have to avoid assigning a shared variable
to itself when trying to add
__builtin_assume_aligned. */
if (n->value & GOVD_SHARED)
{
remove = true;
break;
}
}
}
}
else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0)
remove = true;
}
break;
case OMP_CLAUSE_MAP:
if (code == OMP_TARGET_EXIT_DATA
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER)
{
remove = true;
break;
}
decl = OMP_CLAUSE_DECL (c);
/* Data clauses associated with acc parallel reductions must be
compatible with present_or_copy. Warn and adjust the clause
if that is not the case. */
if (ctx->region_type == ORT_ACC_PARALLEL)
{
tree t = DECL_P (decl) ? decl : TREE_OPERAND (decl, 0);
n = NULL;
if (DECL_P (t))
n = splay_tree_lookup (ctx->variables, (splay_tree_key) t);
if (n && (n->value & GOVD_REDUCTION))
{
enum gomp_map_kind kind = OMP_CLAUSE_MAP_KIND (c);
OMP_CLAUSE_MAP_IN_REDUCTION (c) = 1;
if ((kind & GOMP_MAP_TOFROM) != GOMP_MAP_TOFROM
&& kind != GOMP_MAP_FORCE_PRESENT
&& kind != GOMP_MAP_POINTER)
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"incompatible data clause with reduction "
"on %qE; promoting to present_or_copy",
DECL_NAME (t));
OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_TOFROM);
}
}
}
if (!DECL_P (decl))
{
if ((ctx->region_type & ORT_TARGET) != 0
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
{
if (TREE_CODE (decl) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE))
decl = TREE_OPERAND (decl, 0);
if (TREE_CODE (decl) == COMPONENT_REF)
{
while (TREE_CODE (decl) == COMPONENT_REF)
decl = TREE_OPERAND (decl, 0);
if (DECL_P (decl))
{
n = splay_tree_lookup (ctx->variables,
(splay_tree_key) decl);
if (!(n->value & GOVD_SEEN))
remove = true;
}
}
}
break;
}
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if ((ctx->region_type & ORT_TARGET) != 0
&& !(n->value & GOVD_SEEN)
&& GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c)) == 0
&& (!is_global_var (decl)
|| !lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl))))
{
remove = true;
/* For struct element mapping, if struct is never referenced
in target block and none of the mapping has always modifier,
remove all the struct element mappings, which immediately
follow the GOMP_MAP_STRUCT map clause. */
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_STRUCT)
{
HOST_WIDE_INT cnt = tree_to_shwi (OMP_CLAUSE_SIZE (c));
while (cnt--)
OMP_CLAUSE_CHAIN (c)
= OMP_CLAUSE_CHAIN (OMP_CLAUSE_CHAIN (c));
}
}
else if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_STRUCT
&& code == OMP_TARGET_EXIT_DATA)
remove = true;
else if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_POINTER
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE))
{
/* For GOMP_MAP_FORCE_DEVICEPTR, we'll never enter here, because
for these, TREE_CODE (DECL_SIZE (decl)) will always be
INTEGER_CST. */
gcc_assert (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FORCE_DEVICEPTR);
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
tree mem = build_simple_mem_ref (decl2);
OMP_CLAUSE_DECL (c) = mem;
OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (ctx->outer_context)
{
omp_notice_variable (ctx->outer_context, decl2, true);
omp_notice_variable (ctx->outer_context,
OMP_CLAUSE_SIZE (c), true);
}
if (((ctx->region_type & ORT_TARGET) != 0
|| !ctx->target_firstprivatize_array_bases)
&& ((n->value & GOVD_SEEN) == 0
|| (n->value & (GOVD_PRIVATE | GOVD_FIRSTPRIVATE)) == 0))
{
tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_SIZE (nc) = size_zero_node;
if (ctx->target_firstprivatize_array_bases)
OMP_CLAUSE_SET_MAP_KIND (nc,
GOMP_MAP_FIRSTPRIVATE_POINTER);
else
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_POINTER);
OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = nc;
c = nc;
}
}
else
{
if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_SIZE_UNIT (decl);
gcc_assert ((n->value & GOVD_SEEN) == 0
|| ((n->value & (GOVD_PRIVATE | GOVD_FIRSTPRIVATE))
== 0));
}
break;
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE__CACHE_:
decl = OMP_CLAUSE_DECL (c);
if (!DECL_P (decl))
break;
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
tree mem = build_simple_mem_ref (decl2);
OMP_CLAUSE_DECL (c) = mem;
OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (ctx->outer_context)
{
omp_notice_variable (ctx->outer_context, decl2, true);
omp_notice_variable (ctx->outer_context,
OMP_CLAUSE_SIZE (c), true);
}
}
else if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_SIZE_UNIT (decl);
break;
case OMP_CLAUSE_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
/* OpenACC reductions need a present_or_copy data clause.
Add one if necessary. Error is the reduction is private. */
if (ctx->region_type == ORT_ACC_PARALLEL)
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (n->value & (GOVD_PRIVATE | GOVD_FIRSTPRIVATE))
error_at (OMP_CLAUSE_LOCATION (c), "invalid private "
"reduction on %qE", DECL_NAME (decl));
else if ((n->value & GOVD_MAP) == 0)
{
tree next = OMP_CLAUSE_CHAIN (c);
tree nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_TOFROM);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_CHAIN (c) = nc;
lang_hooks.decls.omp_finish_clause (nc, pre_p);
while (1)
{
OMP_CLAUSE_MAP_IN_REDUCTION (nc) = 1;
if (OMP_CLAUSE_CHAIN (nc) == NULL)
break;
nc = OMP_CLAUSE_CHAIN (nc);
}
OMP_CLAUSE_CHAIN (nc) = next;
n->value |= GOVD_MAP;
}
}
if (DECL_P (decl)
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE__CILK_FOR_COUNT_:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
break;
default:
gcc_unreachable ();
}
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
/* Add in any implicit data sharing. */
struct gimplify_adjust_omp_clauses_data data;
data.list_p = list_p;
data.pre_p = pre_p;
splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, &data);
gimplify_omp_ctxp = ctx->outer_context;
delete_omp_context (ctx);
}
/* Gimplify OACC_CACHE. */
static void
gimplify_oacc_cache (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimplify_scan_omp_clauses (&OACC_CACHE_CLAUSES (expr), pre_p, ORT_ACC,
OACC_CACHE);
gimplify_adjust_omp_clauses (pre_p, NULL, &OACC_CACHE_CLAUSES (expr),
OACC_CACHE);
/* TODO: Do something sensible with this information. */
*expr_p = NULL_TREE;
}
/* Helper function of gimplify_oacc_declare. The helper's purpose is to,
if required, translate 'kind' in CLAUSE into an 'entry' kind and 'exit'
kind. The entry kind will replace the one in CLAUSE, while the exit
kind will be used in a new omp_clause and returned to the caller. */
static tree
gimplify_oacc_declare_1 (tree clause)
{
HOST_WIDE_INT kind, new_op;
bool ret = false;
tree c = NULL;
kind = OMP_CLAUSE_MAP_KIND (clause);
switch (kind)
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_FORCE_TO:
new_op = GOMP_MAP_DELETE;
ret = true;
break;
case GOMP_MAP_FORCE_FROM:
OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_FORCE_ALLOC);
new_op = GOMP_MAP_FORCE_FROM;
ret = true;
break;
case GOMP_MAP_FORCE_TOFROM:
OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_FORCE_TO);
new_op = GOMP_MAP_FORCE_FROM;
ret = true;
break;
case GOMP_MAP_FROM:
OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_FORCE_ALLOC);
new_op = GOMP_MAP_FROM;
ret = true;
break;
case GOMP_MAP_TOFROM:
OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_TO);
new_op = GOMP_MAP_FROM;
ret = true;
break;
case GOMP_MAP_DEVICE_RESIDENT:
case GOMP_MAP_FORCE_DEVICEPTR:
case GOMP_MAP_FORCE_PRESENT:
case GOMP_MAP_LINK:
case GOMP_MAP_POINTER:
case GOMP_MAP_TO:
break;
default:
gcc_unreachable ();
break;
}
if (ret)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clause), OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c, new_op);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clause);
}
return c;
}
/* Gimplify OACC_DECLARE. */
static void
gimplify_oacc_declare (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gomp_target *stmt;
tree clauses, t, decl;
clauses = OACC_DECLARE_CLAUSES (expr);
gimplify_scan_omp_clauses (&clauses, pre_p, ORT_TARGET_DATA, OACC_DECLARE);
gimplify_adjust_omp_clauses (pre_p, NULL, &clauses, OACC_DECLARE);
for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
{
decl = OMP_CLAUSE_DECL (t);
if (TREE_CODE (decl) == MEM_REF)
decl = TREE_OPERAND (decl, 0);
if (VAR_P (decl) && !is_oacc_declared (decl))
{
tree attr = get_identifier ("oacc declare target");
DECL_ATTRIBUTES (decl) = tree_cons (attr, NULL_TREE,
DECL_ATTRIBUTES (decl));
}
if (VAR_P (decl)
&& !is_global_var (decl)
&& DECL_CONTEXT (decl) == current_function_decl)
{
tree c = gimplify_oacc_declare_1 (t);
if (c)
{
if (oacc_declare_returns == NULL)
oacc_declare_returns = new hash_map<tree, tree>;
oacc_declare_returns->put (decl, c);
}
}
if (gimplify_omp_ctxp)
omp_add_variable (gimplify_omp_ctxp, decl, GOVD_SEEN);
}
stmt = gimple_build_omp_target (NULL, GF_OMP_TARGET_KIND_OACC_DECLARE,
clauses);
gimplify_seq_add_stmt (pre_p, stmt);
*expr_p = NULL_TREE;
}
/* Gimplify the contents of an OMP_PARALLEL statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static void
gimplify_omp_parallel (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple *g;
gimple_seq body = NULL;
gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p,
OMP_PARALLEL_COMBINED (expr)
? ORT_COMBINED_PARALLEL
: ORT_PARALLEL, OMP_PARALLEL);
push_gimplify_context ();
g = gimplify_and_return_first (OMP_PARALLEL_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
gimplify_adjust_omp_clauses (pre_p, body, &OMP_PARALLEL_CLAUSES (expr),
OMP_PARALLEL);
g = gimple_build_omp_parallel (body,
OMP_PARALLEL_CLAUSES (expr),
NULL_TREE, NULL_TREE);
if (OMP_PARALLEL_COMBINED (expr))
gimple_omp_set_subcode (g, GF_OMP_PARALLEL_COMBINED);
gimplify_seq_add_stmt (pre_p, g);
*expr_p = NULL_TREE;
}
/* Gimplify the contents of an OMP_TASK statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static void
gimplify_omp_task (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple *g;
gimple_seq body = NULL;
gimplify_scan_omp_clauses (&OMP_TASK_CLAUSES (expr), pre_p,
omp_find_clause (OMP_TASK_CLAUSES (expr),
OMP_CLAUSE_UNTIED)
? ORT_UNTIED_TASK : ORT_TASK, OMP_TASK);
push_gimplify_context ();
g = gimplify_and_return_first (OMP_TASK_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
gimplify_adjust_omp_clauses (pre_p, body, &OMP_TASK_CLAUSES (expr),
OMP_TASK);
g = gimple_build_omp_task (body,
OMP_TASK_CLAUSES (expr),
NULL_TREE, NULL_TREE,
NULL_TREE, NULL_TREE, NULL_TREE);
gimplify_seq_add_stmt (pre_p, g);
*expr_p = NULL_TREE;
}
/* Helper function of gimplify_omp_for, find OMP_FOR resp. OMP_SIMD
with non-NULL OMP_FOR_INIT. */
static tree
find_combined_omp_for (tree *tp, int *walk_subtrees, void *)
{
*walk_subtrees = 0;
switch (TREE_CODE (*tp))
{
case OMP_FOR:
*walk_subtrees = 1;
/* FALLTHRU */
case OMP_SIMD:
if (OMP_FOR_INIT (*tp) != NULL_TREE)
return *tp;
break;
case BIND_EXPR:
case STATEMENT_LIST:
case OMP_PARALLEL:
*walk_subtrees = 1;
break;
default:
break;
}
return NULL_TREE;
}
/* Gimplify the gross structure of an OMP_FOR statement. */
static enum gimplify_status
gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
{
tree for_stmt, orig_for_stmt, inner_for_stmt = NULL_TREE, decl, var, t;
enum gimplify_status ret = GS_ALL_DONE;
enum gimplify_status tret;
gomp_for *gfor;
gimple_seq for_body, for_pre_body;
int i;
bitmap has_decl_expr = NULL;
enum omp_region_type ort = ORT_WORKSHARE;
orig_for_stmt = for_stmt = *expr_p;
switch (TREE_CODE (for_stmt))
{
case OMP_FOR:
case CILK_FOR:
case OMP_DISTRIBUTE:
break;
case OACC_LOOP:
ort = ORT_ACC;
break;
case OMP_TASKLOOP:
if (omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_UNTIED))
ort = ORT_UNTIED_TASK;
else
ort = ORT_TASK;
break;
case OMP_SIMD:
case CILK_SIMD:
ort = ORT_SIMD;
break;
default:
gcc_unreachable ();
}
/* Set OMP_CLAUSE_LINEAR_NO_COPYIN flag on explicit linear
clause for the IV. */
if (ort == ORT_SIMD && TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), 0);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
decl = TREE_OPERAND (t, 0);
for (tree c = OMP_FOR_CLAUSES (for_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_DECL (c) == decl)
{
OMP_CLAUSE_LINEAR_NO_COPYIN (c) = 1;
break;
}
}
if (OMP_FOR_INIT (for_stmt) == NULL_TREE)
{
gcc_assert (TREE_CODE (for_stmt) != OACC_LOOP);
inner_for_stmt = walk_tree (&OMP_FOR_BODY (for_stmt),
find_combined_omp_for, NULL, NULL);
if (inner_for_stmt == NULL_TREE)
{
gcc_assert (seen_error ());
*expr_p = NULL_TREE;
return GS_ERROR;
}
}
if (TREE_CODE (for_stmt) != OMP_TASKLOOP)
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p, ort,
TREE_CODE (for_stmt));
if (TREE_CODE (for_stmt) == OMP_DISTRIBUTE)
gimplify_omp_ctxp->distribute = true;
/* Handle OMP_FOR_INIT. */
for_pre_body = NULL;
if (ort == ORT_SIMD && OMP_FOR_PRE_BODY (for_stmt))
{
has_decl_expr = BITMAP_ALLOC (NULL);
if (TREE_CODE (OMP_FOR_PRE_BODY (for_stmt)) == DECL_EXPR
&& TREE_CODE (DECL_EXPR_DECL (OMP_FOR_PRE_BODY (for_stmt)))
== VAR_DECL)
{
t = OMP_FOR_PRE_BODY (for_stmt);
bitmap_set_bit (has_decl_expr, DECL_UID (DECL_EXPR_DECL (t)));
}
else if (TREE_CODE (OMP_FOR_PRE_BODY (for_stmt)) == STATEMENT_LIST)
{
tree_stmt_iterator si;
for (si = tsi_start (OMP_FOR_PRE_BODY (for_stmt)); !tsi_end_p (si);
tsi_next (&si))
{
t = tsi_stmt (si);
if (TREE_CODE (t) == DECL_EXPR
&& TREE_CODE (DECL_EXPR_DECL (t)) == VAR_DECL)
bitmap_set_bit (has_decl_expr, DECL_UID (DECL_EXPR_DECL (t)));
}
}
}
if (OMP_FOR_PRE_BODY (for_stmt))
{
if (TREE_CODE (for_stmt) != OMP_TASKLOOP || gimplify_omp_ctxp)
gimplify_and_add (OMP_FOR_PRE_BODY (for_stmt), &for_pre_body);
else
{
struct gimplify_omp_ctx ctx;
memset (&ctx, 0, sizeof (ctx));
ctx.region_type = ORT_NONE;
gimplify_omp_ctxp = &ctx;
gimplify_and_add (OMP_FOR_PRE_BODY (for_stmt), &for_pre_body);
gimplify_omp_ctxp = NULL;
}
}
OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE;
if (OMP_FOR_INIT (for_stmt) == NULL_TREE)
for_stmt = inner_for_stmt;
/* For taskloop, need to gimplify the start, end and step before the
taskloop, outside of the taskloop omp context. */
if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP)
{
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
if (!is_gimple_constant (TREE_OPERAND (t, 1)))
{
TREE_OPERAND (t, 1)
= get_initialized_tmp_var (TREE_OPERAND (t, 1),
pre_p, NULL, false);
tree c = build_omp_clause (input_location,
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (t, 1);
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (orig_for_stmt);
OMP_FOR_CLAUSES (orig_for_stmt) = c;
}
/* Handle OMP_FOR_COND. */
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
if (!is_gimple_constant (TREE_OPERAND (t, 1)))
{
TREE_OPERAND (t, 1)
= get_initialized_tmp_var (TREE_OPERAND (t, 1),
gimple_seq_empty_p (for_pre_body)
? pre_p : &for_pre_body, NULL,
false);
tree c = build_omp_clause (input_location,
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (t, 1);
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (orig_for_stmt);
OMP_FOR_CLAUSES (orig_for_stmt) = c;
}
/* Handle OMP_FOR_INCR. */
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
if (TREE_CODE (t) == MODIFY_EXPR)
{
decl = TREE_OPERAND (t, 0);
t = TREE_OPERAND (t, 1);
tree *tp = &TREE_OPERAND (t, 1);
if (TREE_CODE (t) == PLUS_EXPR && *tp == decl)
tp = &TREE_OPERAND (t, 0);
if (!is_gimple_constant (*tp))
{
gimple_seq *seq = gimple_seq_empty_p (for_pre_body)
? pre_p : &for_pre_body;
*tp = get_initialized_tmp_var (*tp, seq, NULL, false);
tree c = build_omp_clause (input_location,
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = *tp;
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (orig_for_stmt);
OMP_FOR_CLAUSES (orig_for_stmt) = c;
}
}
}
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (orig_for_stmt), pre_p, ort,
OMP_TASKLOOP);
}
if (orig_for_stmt != for_stmt)
gimplify_omp_ctxp->combined_loop = true;
for_body = NULL;
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_COND (for_stmt)));
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_INCR (for_stmt)));
tree c = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_ORDERED);
bool is_doacross = false;
if (c && OMP_CLAUSE_ORDERED_EXPR (c))
{
is_doacross = true;
gimplify_omp_ctxp->loop_iter_var.create (TREE_VEC_LENGTH
(OMP_FOR_INIT (for_stmt))
* 2);
}
int collapse = 1, tile = 0;
c = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_COLLAPSE);
if (c)
collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (c));
c = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_TILE);
if (c)
tile = list_length (OMP_CLAUSE_TILE_LIST (c));
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
decl = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (decl));
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl))
|| POINTER_TYPE_P (TREE_TYPE (decl)));
if (is_doacross)
{
if (TREE_CODE (for_stmt) == OMP_FOR && OMP_FOR_ORIG_DECLS (for_stmt))
gimplify_omp_ctxp->loop_iter_var.quick_push
(TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i));
else
gimplify_omp_ctxp->loop_iter_var.quick_push (decl);
gimplify_omp_ctxp->loop_iter_var.quick_push (decl);
}
/* Make sure the iteration variable is private. */
tree c = NULL_TREE;
tree c2 = NULL_TREE;
if (orig_for_stmt != for_stmt)
/* Do this only on innermost construct for combined ones. */;
else if (ort == ORT_SIMD)
{
splay_tree_node n = splay_tree_lookup (gimplify_omp_ctxp->variables,
(splay_tree_key) decl);
omp_is_private (gimplify_omp_ctxp, decl,
1 + (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
!= 1));
if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0)
omp_notice_variable (gimplify_omp_ctxp, decl, true);
else if (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1)
{
c = build_omp_clause (input_location, OMP_CLAUSE_LINEAR);
OMP_CLAUSE_LINEAR_NO_COPYIN (c) = 1;
unsigned int flags = GOVD_LINEAR | GOVD_EXPLICIT | GOVD_SEEN;
if (has_decl_expr
&& bitmap_bit_p (has_decl_expr, DECL_UID (decl)))
{
OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
struct gimplify_omp_ctx *outer
= gimplify_omp_ctxp->outer_context;
if (outer && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
{
if (outer->region_type == ORT_WORKSHARE
&& outer->combined_loop)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n != NULL && (n->value & GOVD_LOCAL) != 0)
{
OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
else
{
struct gimplify_omp_ctx *octx = outer->outer_context;
if (octx
&& octx->region_type == ORT_COMBINED_PARALLEL
&& octx->outer_context
&& (octx->outer_context->region_type
== ORT_WORKSHARE)
&& octx->outer_context->combined_loop)
{
octx = octx->outer_context;
n = splay_tree_lookup (octx->variables,
(splay_tree_key)decl);
if (n != NULL && (n->value & GOVD_LOCAL) != 0)
{
OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
}
}
}
}
OMP_CLAUSE_DECL (c) = decl;
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (for_stmt);
OMP_FOR_CLAUSES (for_stmt) = c;
omp_add_variable (gimplify_omp_ctxp, decl, flags);
if (outer && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
{
if (outer->region_type == ORT_WORKSHARE
&& outer->combined_loop)
{
if (outer->outer_context
&& (outer->outer_context->region_type
== ORT_COMBINED_PARALLEL))
outer = outer->outer_context;
else if (omp_check_private (outer, decl, false))
outer = NULL;
}
else if (((outer->region_type & ORT_TASK) != 0)
&& outer->combined_loop
&& !omp_check_private (gimplify_omp_ctxp,
decl, false))
;
else if (outer->region_type != ORT_COMBINED_PARALLEL)
{
omp_notice_variable (outer, decl, true);
outer = NULL;
}
if (outer)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0)
{
omp_add_variable (outer, decl,
GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer->region_type == ORT_COMBINED_PARALLEL
&& outer->outer_context
&& (outer->outer_context->region_type
== ORT_WORKSHARE)
&& outer->outer_context->combined_loop)
{
outer = outer->outer_context;
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (omp_check_private (outer, decl, false))
outer = NULL;
else if (n == NULL
|| ((n->value & GOVD_DATA_SHARE_CLASS)
== 0))
omp_add_variable (outer, decl,
GOVD_LASTPRIVATE
| GOVD_SEEN);
else
outer = NULL;
}
if (outer && outer->outer_context
&& (outer->outer_context->region_type
== ORT_COMBINED_TEAMS))
{
outer = outer->outer_context;
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n == NULL
|| (n->value & GOVD_DATA_SHARE_CLASS) == 0)
omp_add_variable (outer, decl,
GOVD_SHARED | GOVD_SEEN);
else
outer = NULL;
}
if (outer && outer->outer_context)
omp_notice_variable (outer->outer_context, decl,
true);
}
}
}
}
else
{
bool lastprivate
= (!has_decl_expr
|| !bitmap_bit_p (has_decl_expr, DECL_UID (decl)));
struct gimplify_omp_ctx *outer
= gimplify_omp_ctxp->outer_context;
if (outer && lastprivate)
{
if (outer->region_type == ORT_WORKSHARE
&& outer->combined_loop)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n != NULL && (n->value & GOVD_LOCAL) != 0)
{
lastprivate = false;
outer = NULL;
}
else if (outer->outer_context
&& (outer->outer_context->region_type
== ORT_COMBINED_PARALLEL))
outer = outer->outer_context;
else if (omp_check_private (outer, decl, false))
outer = NULL;
}
else if (((outer->region_type & ORT_TASK) != 0)
&& outer->combined_loop
&& !omp_check_private (gimplify_omp_ctxp,
decl, false))
;
else if (outer->region_type != ORT_COMBINED_PARALLEL)
{
omp_notice_variable (outer, decl, true);
outer = NULL;
}
if (outer)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0)
{
omp_add_variable (outer, decl,
GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer->region_type == ORT_COMBINED_PARALLEL
&& outer->outer_context
&& (outer->outer_context->region_type
== ORT_WORKSHARE)
&& outer->outer_context->combined_loop)
{
outer = outer->outer_context;
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (omp_check_private (outer, decl, false))
outer = NULL;
else if (n == NULL
|| ((n->value & GOVD_DATA_SHARE_CLASS)
== 0))
omp_add_variable (outer, decl,
GOVD_LASTPRIVATE
| GOVD_SEEN);
else
outer = NULL;
}
if (outer && outer->outer_context
&& (outer->outer_context->region_type
== ORT_COMBINED_TEAMS))
{
outer = outer->outer_context;
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n == NULL
|| (n->value & GOVD_DATA_SHARE_CLASS) == 0)
omp_add_variable (outer, decl,
GOVD_SHARED | GOVD_SEEN);
else
outer = NULL;
}
if (outer && outer->outer_context)
omp_notice_variable (outer->outer_context, decl,
true);
}
}
}
c = build_omp_clause (input_location,
lastprivate ? OMP_CLAUSE_LASTPRIVATE
: OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (c) = decl;
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (for_stmt);
OMP_FOR_CLAUSES (for_stmt) = c;
omp_add_variable (gimplify_omp_ctxp, decl,
(lastprivate ? GOVD_LASTPRIVATE : GOVD_PRIVATE)
| GOVD_EXPLICIT | GOVD_SEEN);
c = NULL_TREE;
}
}
else if (omp_is_private (gimplify_omp_ctxp, decl, 0))
omp_notice_variable (gimplify_omp_ctxp, decl, true);
else
omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN);
/* If DECL is not a gimple register, create a temporary variable to act
as an iteration counter. This is valid, since DECL cannot be
modified in the body of the loop. Similarly for any iteration vars
in simd with collapse > 1 where the iterator vars must be
lastprivate. */
if (orig_for_stmt != for_stmt)
var = decl;
else if (!is_gimple_reg (decl)
|| (ort == ORT_SIMD
&& TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) > 1))
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
/* Make sure omp_add_variable is not called on it prematurely.
We call it ourselves a few lines later. */
gimplify_omp_ctxp = NULL;
var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
gimplify_omp_ctxp = ctx;
TREE_OPERAND (t, 0) = var;
gimplify_seq_add_stmt (&for_body, gimple_build_assign (decl, var));
if (ort == ORT_SIMD
&& TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1)
{
c2 = build_omp_clause (input_location, OMP_CLAUSE_LINEAR);
OMP_CLAUSE_LINEAR_NO_COPYIN (c2) = 1;
OMP_CLAUSE_LINEAR_NO_COPYOUT (c2) = 1;
OMP_CLAUSE_DECL (c2) = var;
OMP_CLAUSE_CHAIN (c2) = OMP_FOR_CLAUSES (for_stmt);
OMP_FOR_CLAUSES (for_stmt) = c2;
omp_add_variable (gimplify_omp_ctxp, var,
GOVD_LINEAR | GOVD_EXPLICIT | GOVD_SEEN);
if (c == NULL_TREE)
{
c = c2;
c2 = NULL_TREE;
}
}
else
omp_add_variable (gimplify_omp_ctxp, var,
GOVD_PRIVATE | GOVD_SEEN);
}
else
var = decl;
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue, false);
ret = MIN (ret, tret);
if (ret == GS_ERROR)
return ret;
/* Handle OMP_FOR_COND. */
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
gcc_assert (COMPARISON_CLASS_P (t));
gcc_assert (TREE_OPERAND (t, 0) == decl);
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue, false);
ret = MIN (ret, tret);
/* Handle OMP_FOR_INCR. */
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
switch (TREE_CODE (t))
{
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
{
tree decl = TREE_OPERAND (t, 0);
/* c_omp_for_incr_canonicalize_ptr() should have been
called to massage things appropriately. */
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl)));
if (orig_for_stmt != for_stmt)
break;
t = build_int_cst (TREE_TYPE (decl), 1);
if (c)
OMP_CLAUSE_LINEAR_STEP (c) = t;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t);
TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
break;
}
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
/* c_omp_for_incr_canonicalize_ptr() should have been
called to massage things appropriately. */
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl)));
if (orig_for_stmt != for_stmt)
break;
t = build_int_cst (TREE_TYPE (decl), -1);
if (c)
OMP_CLAUSE_LINEAR_STEP (c) = t;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t);
TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
break;
case MODIFY_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
t = TREE_OPERAND (t, 1);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
if (TREE_OPERAND (t, 1) == decl)
{
TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0);
TREE_OPERAND (t, 0) = var;
break;
}
/* Fallthru. */
case MINUS_EXPR:
case POINTER_PLUS_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
break;
default:
gcc_unreachable ();
}
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue, false);
ret = MIN (ret, tret);
if (c)
{
tree step = TREE_OPERAND (t, 1);
tree stept = TREE_TYPE (decl);
if (POINTER_TYPE_P (stept))
stept = sizetype;
step = fold_convert (stept, step);
if (TREE_CODE (t) == MINUS_EXPR)
step = fold_build1 (NEGATE_EXPR, stept, step);
OMP_CLAUSE_LINEAR_STEP (c) = step;
if (step != TREE_OPERAND (t, 1))
{
tret = gimplify_expr (&OMP_CLAUSE_LINEAR_STEP (c),
&for_pre_body, NULL,
is_gimple_val, fb_rvalue, false);
ret = MIN (ret, tret);
}
}
break;
default:
gcc_unreachable ();
}
if (c2)
{
gcc_assert (c);
OMP_CLAUSE_LINEAR_STEP (c2) = OMP_CLAUSE_LINEAR_STEP (c);
}
if ((var != decl || collapse > 1 || tile) && orig_for_stmt == for_stmt)
{
for (c = OMP_FOR_CLAUSES (for_stmt); c ; c = OMP_CLAUSE_CHAIN (c))
if (((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) == NULL)
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) == NULL))
&& OMP_CLAUSE_DECL (c) == decl)
{
if (is_doacross && (collapse == 1 || i >= collapse))
t = var;
else
{
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == var);
t = TREE_OPERAND (t, 1);
gcc_assert (TREE_CODE (t) == PLUS_EXPR
|| TREE_CODE (t) == MINUS_EXPR
|| TREE_CODE (t) == POINTER_PLUS_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == var);
t = build2 (TREE_CODE (t), TREE_TYPE (decl),
is_doacross ? var : decl,
TREE_OPERAND (t, 1));
}
gimple_seq *seq;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
seq = &OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c);
else
seq = &OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c);
gimplify_assign (decl, t, seq);
}
}
}
BITMAP_FREE (has_decl_expr);
if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP)
{
push_gimplify_context ();
if (TREE_CODE (OMP_FOR_BODY (orig_for_stmt)) != BIND_EXPR)
{
OMP_FOR_BODY (orig_for_stmt)
= build3 (BIND_EXPR, void_type_node, NULL,
OMP_FOR_BODY (orig_for_stmt), NULL);
TREE_SIDE_EFFECTS (OMP_FOR_BODY (orig_for_stmt)) = 1;
}
}
gimple *g = gimplify_and_return_first (OMP_FOR_BODY (orig_for_stmt),
&for_body);
if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP)
{
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
}
if (orig_for_stmt != for_stmt)
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
decl = TREE_OPERAND (t, 0);
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP)
gimplify_omp_ctxp = ctx->outer_context;
var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
gimplify_omp_ctxp = ctx;
omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN);
TREE_OPERAND (t, 0) = var;
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
TREE_OPERAND (t, 1) = copy_node (TREE_OPERAND (t, 1));
TREE_OPERAND (TREE_OPERAND (t, 1), 0) = var;
}
gimplify_adjust_omp_clauses (pre_p, for_body,
&OMP_FOR_CLAUSES (orig_for_stmt),
TREE_CODE (orig_for_stmt));
int kind;
switch (TREE_CODE (orig_for_stmt))
{
case OMP_FOR: kind = GF_OMP_FOR_KIND_FOR; break;
case OMP_SIMD: kind = GF_OMP_FOR_KIND_SIMD; break;
case CILK_SIMD: kind = GF_OMP_FOR_KIND_CILKSIMD; break;
case CILK_FOR: kind = GF_OMP_FOR_KIND_CILKFOR; break;
case OMP_DISTRIBUTE: kind = GF_OMP_FOR_KIND_DISTRIBUTE; break;
case OMP_TASKLOOP: kind = GF_OMP_FOR_KIND_TASKLOOP; break;
case OACC_LOOP: kind = GF_OMP_FOR_KIND_OACC_LOOP; break;
default:
gcc_unreachable ();
}
gfor = gimple_build_omp_for (for_body, kind, OMP_FOR_CLAUSES (orig_for_stmt),
TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)),
for_pre_body);
if (orig_for_stmt != for_stmt)
gimple_omp_for_set_combined_p (gfor, true);
if (gimplify_omp_ctxp
&& (gimplify_omp_ctxp->combined_loop
|| (gimplify_omp_ctxp->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context
&& gimplify_omp_ctxp->outer_context->combined_loop)))
{
gimple_omp_for_set_combined_into_p (gfor, true);
if (gimplify_omp_ctxp->combined_loop)
gcc_assert (TREE_CODE (orig_for_stmt) == OMP_SIMD);
else
gcc_assert (TREE_CODE (orig_for_stmt) == OMP_FOR);
}
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gimple_omp_for_set_index (gfor, i, TREE_OPERAND (t, 0));
gimple_omp_for_set_initial (gfor, i, TREE_OPERAND (t, 1));
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
gimple_omp_for_set_cond (gfor, i, TREE_CODE (t));
gimple_omp_for_set_final (gfor, i, TREE_OPERAND (t, 1));
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
gimple_omp_for_set_incr (gfor, i, TREE_OPERAND (t, 1));
}
/* OMP_TASKLOOP is gimplified as two GIMPLE_OMP_FOR taskloop
constructs with GIMPLE_OMP_TASK sandwiched in between them.
The outer taskloop stands for computing the number of iterations,
counts for collapsed loops and holding taskloop specific clauses.
The task construct stands for the effect of data sharing on the
explicit task it creates and the inner taskloop stands for expansion
of the static loop inside of the explicit task construct. */
if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP)
{
tree *gfor_clauses_ptr = gimple_omp_for_clauses_ptr (gfor);
tree task_clauses = NULL_TREE;
tree c = *gfor_clauses_ptr;
tree *gtask_clauses_ptr = &task_clauses;
tree outer_for_clauses = NULL_TREE;
tree *gforo_clauses_ptr = &outer_for_clauses;
for (; c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
/* These clauses are allowed on task, move them there. */
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PRIORITY:
*gtask_clauses_ptr = c;
gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
break;
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_TASKLOOP_IV (c))
{
/* We want private on outer for and firstprivate
on task. */
*gtask_clauses_ptr
= build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (*gtask_clauses_ptr) = OMP_CLAUSE_DECL (c);
lang_hooks.decls.omp_finish_clause (*gtask_clauses_ptr, NULL);
gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr);
*gforo_clauses_ptr = c;
gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
}
else
{
*gtask_clauses_ptr = c;
gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
}
break;
/* These clauses go into outer taskloop clauses. */
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_NOGROUP:
*gforo_clauses_ptr = c;
gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
break;
/* Taskloop clause we duplicate on both taskloops. */
case OMP_CLAUSE_COLLAPSE:
*gfor_clauses_ptr = c;
gfor_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
*gforo_clauses_ptr = copy_node (c);
gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (*gforo_clauses_ptr);
break;
/* For lastprivate, keep the clause on inner taskloop, and add
a shared clause on task. If the same decl is also firstprivate,
add also firstprivate clause on the inner taskloop. */
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_TASKLOOP_IV (c))
{
/* For taskloop C++ lastprivate IVs, we want:
1) private on outer taskloop
2) firstprivate and shared on task
3) lastprivate on inner taskloop */
*gtask_clauses_ptr
= build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (*gtask_clauses_ptr) = OMP_CLAUSE_DECL (c);
lang_hooks.decls.omp_finish_clause (*gtask_clauses_ptr, NULL);
gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr);
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c) = 1;
*gforo_clauses_ptr = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (*gforo_clauses_ptr) = OMP_CLAUSE_DECL (c);
OMP_CLAUSE_PRIVATE_TASKLOOP_IV (*gforo_clauses_ptr) = 1;
TREE_TYPE (*gforo_clauses_ptr) = TREE_TYPE (c);
gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (*gforo_clauses_ptr);
}
*gfor_clauses_ptr = c;
gfor_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
*gtask_clauses_ptr
= build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_SHARED);
OMP_CLAUSE_DECL (*gtask_clauses_ptr) = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
OMP_CLAUSE_SHARED_FIRSTPRIVATE (*gtask_clauses_ptr) = 1;
gtask_clauses_ptr
= &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr);
break;
default:
gcc_unreachable ();
}
*gfor_clauses_ptr = NULL_TREE;
*gtask_clauses_ptr = NULL_TREE;
*gforo_clauses_ptr = NULL_TREE;
g = gimple_build_bind (NULL_TREE, gfor, NULL_TREE);
g = gimple_build_omp_task (g, task_clauses, NULL_TREE, NULL_TREE,
NULL_TREE, NULL_TREE, NULL_TREE);
gimple_omp_task_set_taskloop_p (g, true);
g = gimple_build_bind (NULL_TREE, g, NULL_TREE);
gomp_for *gforo
= gimple_build_omp_for (g, GF_OMP_FOR_KIND_TASKLOOP, outer_for_clauses,
gimple_omp_for_collapse (gfor),
gimple_omp_for_pre_body (gfor));
gimple_omp_for_set_pre_body (gfor, NULL);
gimple_omp_for_set_combined_p (gforo, true);
gimple_omp_for_set_combined_into_p (gfor, true);
for (i = 0; i < (int) gimple_omp_for_collapse (gfor); i++)
{
tree type = TREE_TYPE (gimple_omp_for_index (gfor, i));
tree v = create_tmp_var (type);
gimple_omp_for_set_index (gforo, i, v);
t = unshare_expr (gimple_omp_for_initial (gfor, i));
gimple_omp_for_set_initial (gforo, i, t);
gimple_omp_for_set_cond (gforo, i,
gimple_omp_for_cond (gfor, i));
t = unshare_expr (gimple_omp_for_final (gfor, i));
gimple_omp_for_set_final (gforo, i, t);
t = unshare_expr (gimple_omp_for_incr (gfor, i));
gcc_assert (TREE_OPERAND (t, 0) == gimple_omp_for_index (gfor, i));
TREE_OPERAND (t, 0) = v;
gimple_omp_for_set_incr (gforo, i, t);
t = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (t) = v;
OMP_CLAUSE_CHAIN (t) = gimple_omp_for_clauses (gforo);
gimple_omp_for_set_clauses (gforo, t);
}
gimplify_seq_add_stmt (pre_p, gforo);
}
else
gimplify_seq_add_stmt (pre_p, gfor);
if (ret != GS_ALL_DONE)
return GS_ERROR;
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Helper function of optimize_target_teams, find OMP_TEAMS inside
of OMP_TARGET's body. */
static tree
find_omp_teams (tree *tp, int *walk_subtrees, void *)
{
*walk_subtrees = 0;
switch (TREE_CODE (*tp))
{
case OMP_TEAMS:
return *tp;
case BIND_EXPR:
case STATEMENT_LIST:
*walk_subtrees = 1;
break;
default:
break;
}
return NULL_TREE;
}
/* Helper function of optimize_target_teams, determine if the expression
can be computed safely before the target construct on the host. */
static tree
computable_teams_clause (tree *tp, int *walk_subtrees, void *)
{
splay_tree_node n;
if (TYPE_P (*tp))
{
*walk_subtrees = 0;
return NULL_TREE;
}
switch (TREE_CODE (*tp))
{
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
*walk_subtrees = 0;
if (error_operand_p (*tp)
|| !INTEGRAL_TYPE_P (TREE_TYPE (*tp))
|| DECL_HAS_VALUE_EXPR_P (*tp)
|| DECL_THREAD_LOCAL_P (*tp)
|| TREE_SIDE_EFFECTS (*tp)
|| TREE_THIS_VOLATILE (*tp))
return *tp;
if (is_global_var (*tp)
&& (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (*tp))
|| lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (*tp))))
return *tp;
if (VAR_P (*tp)
&& !DECL_SEEN_IN_BIND_EXPR_P (*tp)
&& !is_global_var (*tp)
&& decl_function_context (*tp) == current_function_decl)
return *tp;
n = splay_tree_lookup (gimplify_omp_ctxp->variables,
(splay_tree_key) *tp);
if (n == NULL)
{
if (gimplify_omp_ctxp->target_map_scalars_firstprivate)
return NULL_TREE;
return *tp;
}
else if (n->value & GOVD_LOCAL)
return *tp;
else if (n->value & GOVD_FIRSTPRIVATE)
return NULL_TREE;
else if ((n->value & (GOVD_MAP | GOVD_MAP_ALWAYS_TO))
== (GOVD_MAP | GOVD_MAP_ALWAYS_TO))
return NULL_TREE;
return *tp;
case INTEGER_CST:
if (!INTEGRAL_TYPE_P (TREE_TYPE (*tp)))
return *tp;
return NULL_TREE;
case TARGET_EXPR:
if (TARGET_EXPR_INITIAL (*tp)
|| TREE_CODE (TARGET_EXPR_SLOT (*tp)) != VAR_DECL)
return *tp;
return computable_teams_clause (&TARGET_EXPR_SLOT (*tp),
walk_subtrees, NULL);
/* Allow some reasonable subset of integral arithmetics. */
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case MIN_EXPR:
case MAX_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_AND_EXPR:
case NEGATE_EXPR:
case ABS_EXPR:
case BIT_NOT_EXPR:
case NON_LVALUE_EXPR:
CASE_CONVERT:
if (!INTEGRAL_TYPE_P (TREE_TYPE (*tp)))
return *tp;
return NULL_TREE;
/* And disallow anything else, except for comparisons. */
default:
if (COMPARISON_CLASS_P (*tp))
return NULL_TREE;
return *tp;
}
}
/* Try to determine if the num_teams and/or thread_limit expressions
can have their values determined already before entering the
target construct.
INTEGER_CSTs trivially are,
integral decls that are firstprivate (explicitly or implicitly)
or explicitly map(always, to:) or map(always, tofrom:) on the target
region too, and expressions involving simple arithmetics on those
too, function calls are not ok, dereferencing something neither etc.
Add NUM_TEAMS and THREAD_LIMIT clauses to the OMP_CLAUSES of
EXPR based on what we find:
0 stands for clause not specified at all, use implementation default
-1 stands for value that can't be determined easily before entering
the target construct.
If teams construct is not present at all, use 1 for num_teams
and 0 for thread_limit (only one team is involved, and the thread
limit is implementation defined. */
static void
optimize_target_teams (tree target, gimple_seq *pre_p)
{
tree body = OMP_BODY (target);
tree teams = walk_tree (&body, find_omp_teams, NULL, NULL);
tree num_teams = integer_zero_node;
tree thread_limit = integer_zero_node;
location_t num_teams_loc = EXPR_LOCATION (target);
location_t thread_limit_loc = EXPR_LOCATION (target);
tree c, *p, expr;
struct gimplify_omp_ctx *target_ctx = gimplify_omp_ctxp;
if (teams == NULL_TREE)
num_teams = integer_one_node;
else
for (c = OMP_TEAMS_CLAUSES (teams); c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS)
{
p = &num_teams;
num_teams_loc = OMP_CLAUSE_LOCATION (c);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT)
{
p = &thread_limit;
thread_limit_loc = OMP_CLAUSE_LOCATION (c);
}
else
continue;
expr = OMP_CLAUSE_OPERAND (c, 0);
if (TREE_CODE (expr) == INTEGER_CST)
{
*p = expr;
continue;
}
if (walk_tree (&expr, computable_teams_clause, NULL, NULL))
{
*p = integer_minus_one_node;
continue;
}
*p = expr;
gimplify_omp_ctxp = gimplify_omp_ctxp->outer_context;
if (gimplify_expr (p, pre_p, NULL, is_gimple_val, fb_rvalue, false)
== GS_ERROR)
{
gimplify_omp_ctxp = target_ctx;
*p = integer_minus_one_node;
continue;
}
gimplify_omp_ctxp = target_ctx;
if (!DECL_P (expr) && TREE_CODE (expr) != TARGET_EXPR)
OMP_CLAUSE_OPERAND (c, 0) = *p;
}
c = build_omp_clause (thread_limit_loc, OMP_CLAUSE_THREAD_LIMIT);
OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = thread_limit;
OMP_CLAUSE_CHAIN (c) = OMP_TARGET_CLAUSES (target);
OMP_TARGET_CLAUSES (target) = c;
c = build_omp_clause (num_teams_loc, OMP_CLAUSE_NUM_TEAMS);
OMP_CLAUSE_NUM_TEAMS_EXPR (c) = num_teams;
OMP_CLAUSE_CHAIN (c) = OMP_TARGET_CLAUSES (target);
OMP_TARGET_CLAUSES (target) = c;
}
/* Gimplify the gross structure of several OMP constructs. */
static void
gimplify_omp_workshare (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple *stmt;
gimple_seq body = NULL;
enum omp_region_type ort;
switch (TREE_CODE (expr))
{
case OMP_SECTIONS:
case OMP_SINGLE:
ort = ORT_WORKSHARE;
break;
case OMP_TARGET:
ort = OMP_TARGET_COMBINED (expr) ? ORT_COMBINED_TARGET : ORT_TARGET;
break;
case OACC_KERNELS:
ort = ORT_ACC_KERNELS;
break;
case OACC_PARALLEL:
ort = ORT_ACC_PARALLEL;
break;
case OACC_DATA:
ort = ORT_ACC_DATA;
break;
case OMP_TARGET_DATA:
ort = ORT_TARGET_DATA;
break;
case OMP_TEAMS:
ort = OMP_TEAMS_COMBINED (expr) ? ORT_COMBINED_TEAMS : ORT_TEAMS;
break;
case OACC_HOST_DATA:
ort = ORT_ACC_HOST_DATA;
break;
default:
gcc_unreachable ();
}
gimplify_scan_omp_clauses (&OMP_CLAUSES (expr), pre_p, ort,
TREE_CODE (expr));
if (TREE_CODE (expr) == OMP_TARGET)
optimize_target_teams (expr, pre_p);
if ((ort & (ORT_TARGET | ORT_TARGET_DATA)) != 0)
{
push_gimplify_context ();
gimple *g = gimplify_and_return_first (OMP_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
if ((ort & ORT_TARGET_DATA) != 0)
{
enum built_in_function end_ix;
switch (TREE_CODE (expr))
{
case OACC_DATA:
case OACC_HOST_DATA:
end_ix = BUILT_IN_GOACC_DATA_END;
break;
case OMP_TARGET_DATA:
end_ix = BUILT_IN_GOMP_TARGET_END_DATA;
break;
default:
gcc_unreachable ();
}
tree fn = builtin_decl_explicit (end_ix);
g = gimple_build_call (fn, 0);
gimple_seq cleanup = NULL;
gimple_seq_add_stmt (&cleanup, g);
g = gimple_build_try (body, cleanup, GIMPLE_TRY_FINALLY);
body = NULL;
gimple_seq_add_stmt (&body, g);
}
}
else
gimplify_and_add (OMP_BODY (expr), &body);
gimplify_adjust_omp_clauses (pre_p, body, &OMP_CLAUSES (expr),
TREE_CODE (expr));
switch (TREE_CODE (expr))
{
case OACC_DATA:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_DATA,
OMP_CLAUSES (expr));
break;
case OACC_KERNELS:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_KERNELS,
OMP_CLAUSES (expr));
break;
case OACC_HOST_DATA:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_HOST_DATA,
OMP_CLAUSES (expr));
break;
case OACC_PARALLEL:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_PARALLEL,
OMP_CLAUSES (expr));
break;
case OMP_SECTIONS:
stmt = gimple_build_omp_sections (body, OMP_CLAUSES (expr));
break;
case OMP_SINGLE:
stmt = gimple_build_omp_single (body, OMP_CLAUSES (expr));
break;
case OMP_TARGET:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_REGION,
OMP_CLAUSES (expr));
break;
case OMP_TARGET_DATA:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_DATA,
OMP_CLAUSES (expr));
break;
case OMP_TEAMS:
stmt = gimple_build_omp_teams (body, OMP_CLAUSES (expr));
break;
default:
gcc_unreachable ();
}
gimplify_seq_add_stmt (pre_p, stmt);
*expr_p = NULL_TREE;
}
/* Gimplify the gross structure of OpenACC enter/exit data, update, and OpenMP
target update constructs. */
static void
gimplify_omp_target_update (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
int kind;
gomp_target *stmt;
enum omp_region_type ort = ORT_WORKSHARE;
switch (TREE_CODE (expr))
{
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
kind = GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA;
ort = ORT_ACC;
break;
case OACC_UPDATE:
kind = GF_OMP_TARGET_KIND_OACC_UPDATE;
ort = ORT_ACC;
break;
case OMP_TARGET_UPDATE:
kind = GF_OMP_TARGET_KIND_UPDATE;
break;
case OMP_TARGET_ENTER_DATA:
kind = GF_OMP_TARGET_KIND_ENTER_DATA;
break;
case OMP_TARGET_EXIT_DATA:
kind = GF_OMP_TARGET_KIND_EXIT_DATA;
break;
default:
gcc_unreachable ();
}
gimplify_scan_omp_clauses (&OMP_STANDALONE_CLAUSES (expr), pre_p,
ort, TREE_CODE (expr));
gimplify_adjust_omp_clauses (pre_p, NULL, &OMP_STANDALONE_CLAUSES (expr),
TREE_CODE (expr));
stmt = gimple_build_omp_target (NULL, kind, OMP_STANDALONE_CLAUSES (expr));
gimplify_seq_add_stmt (pre_p, stmt);
*expr_p = NULL_TREE;
}
/* A subroutine of gimplify_omp_atomic. The front end is supposed to have
stabilized the lhs of the atomic operation as *ADDR. Return true if
EXPR is this stabilized form. */
static bool
goa_lhs_expr_p (tree expr, tree addr)
{
/* Also include casts to other type variants. The C front end is fond
of adding these for e.g. volatile variables. This is like
STRIP_TYPE_NOPS but includes the main variant lookup. */
STRIP_USELESS_TYPE_CONVERSION (expr);
if (TREE_CODE (expr) == INDIRECT_REF)
{
expr = TREE_OPERAND (expr, 0);
while (expr != addr
&& (CONVERT_EXPR_P (expr)
|| TREE_CODE (expr) == NON_LVALUE_EXPR)
&& TREE_CODE (expr) == TREE_CODE (addr)
&& types_compatible_p (TREE_TYPE (expr), TREE_TYPE (addr)))
{
expr = TREE_OPERAND (expr, 0);
addr = TREE_OPERAND (addr, 0);
}
if (expr == addr)
return true;
return (TREE_CODE (addr) == ADDR_EXPR
&& TREE_CODE (expr) == ADDR_EXPR
&& TREE_OPERAND (addr, 0) == TREE_OPERAND (expr, 0));
}
if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0))
return true;
return false;
}
/* Walk *EXPR_P and replace appearances of *LHS_ADDR with LHS_VAR. If an
expression does not involve the lhs, evaluate it into a temporary.
Return 1 if the lhs appeared as a subexpression, 0 if it did not,
or -1 if an error was encountered. */
static int
goa_stabilize_expr (tree *expr_p, gimple_seq *pre_p, tree lhs_addr,
tree lhs_var)
{
tree expr = *expr_p;
int saw_lhs;
if (goa_lhs_expr_p (expr, lhs_addr))
{
*expr_p = lhs_var;
return 1;
}
if (is_gimple_val (expr))
return 0;
saw_lhs = 0;
switch (TREE_CODE_CLASS (TREE_CODE (expr)))
{
case tcc_binary:
case tcc_comparison:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr,
lhs_var);
/* FALLTHRU */
case tcc_unary:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr,
lhs_var);
break;
case tcc_expression:
switch (TREE_CODE (expr))
{
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p,
lhs_addr, lhs_var);
/* FALLTHRU */
case TRUTH_NOT_EXPR:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p,
lhs_addr, lhs_var);
break;
case COMPOUND_EXPR:
/* Break out any preevaluations from cp_build_modify_expr. */
for (; TREE_CODE (expr) == COMPOUND_EXPR;
expr = TREE_OPERAND (expr, 1))
gimplify_stmt (&TREE_OPERAND (expr, 0), pre_p);
*expr_p = expr;
return goa_stabilize_expr (expr_p, pre_p, lhs_addr, lhs_var);
default:
break;
}
break;
default:
break;
}
if (saw_lhs == 0)
{
enum gimplify_status gs;
gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue);
if (gs != GS_ALL_DONE)
saw_lhs = -1;
}
return saw_lhs;
}
/* Gimplify an OMP_ATOMIC statement. */
static enum gimplify_status
gimplify_omp_atomic (tree *expr_p, gimple_seq *pre_p)
{
tree addr = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_CODE (*expr_p) == OMP_ATOMIC_READ
? NULL : TREE_OPERAND (*expr_p, 1);
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
tree tmp_load;
gomp_atomic_load *loadstmt;
gomp_atomic_store *storestmt;
tmp_load = create_tmp_reg (type);
if (rhs && goa_stabilize_expr (&rhs, pre_p, addr, tmp_load) < 0)
return GS_ERROR;
if (gimplify_expr (&addr, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
loadstmt = gimple_build_omp_atomic_load (tmp_load, addr);
gimplify_seq_add_stmt (pre_p, loadstmt);
if (rhs && gimplify_expr (&rhs, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
if (TREE_CODE (*expr_p) == OMP_ATOMIC_READ)
rhs = tmp_load;
storestmt = gimple_build_omp_atomic_store (rhs);
gimplify_seq_add_stmt (pre_p, storestmt);
if (OMP_ATOMIC_SEQ_CST (*expr_p))
{
gimple_omp_atomic_set_seq_cst (loadstmt);
gimple_omp_atomic_set_seq_cst (storestmt);
}
switch (TREE_CODE (*expr_p))
{
case OMP_ATOMIC_READ:
case OMP_ATOMIC_CAPTURE_OLD:
*expr_p = tmp_load;
gimple_omp_atomic_set_need_value (loadstmt);
break;
case OMP_ATOMIC_CAPTURE_NEW:
*expr_p = rhs;
gimple_omp_atomic_set_need_value (storestmt);
break;
default:
*expr_p = NULL;
break;
}
return GS_ALL_DONE;
}
/* Gimplify a TRANSACTION_EXPR. This involves gimplification of the
body, and adding some EH bits. */
static enum gimplify_status
gimplify_transaction (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p, temp, tbody = TRANSACTION_EXPR_BODY (expr);
gimple *body_stmt;
gtransaction *trans_stmt;
gimple_seq body = NULL;
int subcode = 0;
/* Wrap the transaction body in a BIND_EXPR so we have a context
where to put decls for OMP. */
if (TREE_CODE (tbody) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL, tbody, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
SET_EXPR_LOCATION (bind, EXPR_LOCATION (tbody));
TRANSACTION_EXPR_BODY (expr) = bind;
}
push_gimplify_context ();
temp = voidify_wrapper_expr (*expr_p, NULL);
body_stmt = gimplify_and_return_first (TRANSACTION_EXPR_BODY (expr), &body);
pop_gimplify_context (body_stmt);
trans_stmt = gimple_build_transaction (body);
if (TRANSACTION_EXPR_OUTER (expr))
subcode = GTMA_IS_OUTER;
else if (TRANSACTION_EXPR_RELAXED (expr))
subcode = GTMA_IS_RELAXED;
gimple_transaction_set_subcode (trans_stmt, subcode);
gimplify_seq_add_stmt (pre_p, trans_stmt);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify an OMP_ORDERED construct. EXPR is the tree version. BODY
is the OMP_BODY of the original EXPR (which has already been
gimplified so it's not present in the EXPR).
Return the gimplified GIMPLE_OMP_ORDERED tuple. */
static gimple *
gimplify_omp_ordered (tree expr, gimple_seq body)
{
tree c, decls;
int failures = 0;
unsigned int i;
tree source_c = NULL_TREE;
tree sink_c = NULL_TREE;
if (gimplify_omp_ctxp)
{
for (c = OMP_ORDERED_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& gimplify_omp_ctxp->loop_iter_var.is_empty ()
&& (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK
|| OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> construct with %<depend%> clause must be "
"closely nested inside a loop with %<ordered%> clause "
"with a parameter");
failures++;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
bool fail = false;
for (decls = OMP_CLAUSE_DECL (c), i = 0;
decls && TREE_CODE (decls) == TREE_LIST;
decls = TREE_CHAIN (decls), ++i)
if (i >= gimplify_omp_ctxp->loop_iter_var.length () / 2)
continue;
else if (TREE_VALUE (decls)
!= gimplify_omp_ctxp->loop_iter_var[2 * i])
{
error_at (OMP_CLAUSE_LOCATION (c),
"variable %qE is not an iteration "
"of outermost loop %d, expected %qE",
TREE_VALUE (decls), i + 1,
gimplify_omp_ctxp->loop_iter_var[2 * i]);
fail = true;
failures++;
}
else
TREE_VALUE (decls)
= gimplify_omp_ctxp->loop_iter_var[2 * i + 1];
if (!fail && i != gimplify_omp_ctxp->loop_iter_var.length () / 2)
{
error_at (OMP_CLAUSE_LOCATION (c),
"number of variables in %<depend(sink)%> "
"clause does not match number of "
"iteration variables");
failures++;
}
sink_c = c;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)
{
if (source_c)
{
error_at (OMP_CLAUSE_LOCATION (c),
"more than one %<depend(source)%> clause on an "
"%<ordered%> construct");
failures++;
}
else
source_c = c;
}
}
if (source_c && sink_c)
{
error_at (OMP_CLAUSE_LOCATION (source_c),
"%<depend(source)%> clause specified together with "
"%<depend(sink:)%> clauses on the same construct");
failures++;
}
if (failures)
return gimple_build_nop ();
return gimple_build_omp_ordered (body, OMP_ORDERED_CLAUSES (expr));
}
/* Convert the GENERIC expression tree *EXPR_P to GIMPLE. If the
expression produces a value to be used as an operand inside a GIMPLE
statement, the value will be stored back in *EXPR_P. This value will
be a tree of class tcc_declaration, tcc_constant, tcc_reference or
an SSA_NAME. The corresponding sequence of GIMPLE statements is
emitted in PRE_P and POST_P.
Additionally, this process may overwrite parts of the input
expression during gimplification. Ideally, it should be
possible to do non-destructive gimplification.
EXPR_P points to the GENERIC expression to convert to GIMPLE. If
the expression needs to evaluate to a value to be used as
an operand in a GIMPLE statement, this value will be stored in
*EXPR_P on exit. This happens when the caller specifies one
of fb_lvalue or fb_rvalue fallback flags.
PRE_P will contain the sequence of GIMPLE statements corresponding
to the evaluation of EXPR and all the side-effects that must
be executed before the main expression. On exit, the last
statement of PRE_P is the core statement being gimplified. For
instance, when gimplifying 'if (++a)' the last statement in
PRE_P will be 'if (t.1)' where t.1 is the result of
pre-incrementing 'a'.
POST_P will contain the sequence of GIMPLE statements corresponding
to the evaluation of all the side-effects that must be executed
after the main expression. If this is NULL, the post
side-effects are stored at the end of PRE_P.
The reason why the output is split in two is to handle post
side-effects explicitly. In some cases, an expression may have
inner and outer post side-effects which need to be emitted in
an order different from the one given by the recursive
traversal. For instance, for the expression (*p--)++ the post
side-effects of '--' must actually occur *after* the post
side-effects of '++'. However, gimplification will first visit
the inner expression, so if a separate POST sequence was not
used, the resulting sequence would be:
1 t.1 = *p
2 p = p - 1
3 t.2 = t.1 + 1
4 *p = t.2
However, the post-decrement operation in line #2 must not be
evaluated until after the store to *p at line #4, so the
correct sequence should be:
1 t.1 = *p
2 t.2 = t.1 + 1
3 *p = t.2
4 p = p - 1
So, by specifying a separate post queue, it is possible
to emit the post side-effects in the correct order.
If POST_P is NULL, an internal queue will be used. Before
returning to the caller, the sequence POST_P is appended to
the main output sequence PRE_P.
GIMPLE_TEST_F points to a function that takes a tree T and
returns nonzero if T is in the GIMPLE form requested by the
caller. The GIMPLE predicates are in gimple.c.
FALLBACK tells the function what sort of a temporary we want if
gimplification cannot produce an expression that complies with
GIMPLE_TEST_F.
fb_none means that no temporary should be generated
fb_rvalue means that an rvalue is OK to generate
fb_lvalue means that an lvalue is OK to generate
fb_either means that either is OK, but an lvalue is preferable.
fb_mayfail means that gimplification may fail (in which case
GS_ERROR will be returned)
The return value is either GS_ERROR or GS_ALL_DONE, since this
function iterates until EXPR is completely gimplified or an error
occurs. */
enum gimplify_status
gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool (*gimple_test_f) (tree), fallback_t fallback)
{
tree tmp;
gimple_seq internal_pre = NULL;
gimple_seq internal_post = NULL;
tree save_expr;
bool is_statement;
location_t saved_location;
enum gimplify_status ret;
gimple_stmt_iterator pre_last_gsi, post_last_gsi;
tree label;
save_expr = *expr_p;
if (save_expr == NULL_TREE)
return GS_ALL_DONE;
/* If we are gimplifying a top-level statement, PRE_P must be valid. */
is_statement = gimple_test_f == is_gimple_stmt;
if (is_statement)
gcc_assert (pre_p);
/* Consistency checks. */
if (gimple_test_f == is_gimple_reg)
gcc_assert (fallback & (fb_rvalue | fb_lvalue));
else if (gimple_test_f == is_gimple_val
|| gimple_test_f == is_gimple_call_addr
|| gimple_test_f == is_gimple_condexpr
|| gimple_test_f == is_gimple_mem_rhs
|| gimple_test_f == is_gimple_mem_rhs_or_call
|| gimple_test_f == is_gimple_reg_rhs
|| gimple_test_f == is_gimple_reg_rhs_or_call
|| gimple_test_f == is_gimple_asm_val
|| gimple_test_f == is_gimple_mem_ref_addr)
gcc_assert (fallback & fb_rvalue);
else if (gimple_test_f == is_gimple_min_lval
|| gimple_test_f == is_gimple_lvalue)
gcc_assert (fallback & fb_lvalue);
else if (gimple_test_f == is_gimple_addressable)
gcc_assert (fallback & fb_either);
else if (gimple_test_f == is_gimple_stmt)
gcc_assert (fallback == fb_none);
else
{
/* We should have recognized the GIMPLE_TEST_F predicate to
know what kind of fallback to use in case a temporary is
needed to hold the value or address of *EXPR_P. */
gcc_unreachable ();
}
/* We used to check the predicate here and return immediately if it
succeeds. This is wrong; the design is for gimplification to be
idempotent, and for the predicates to only test for valid forms, not
whether they are fully simplified. */
if (pre_p == NULL)
pre_p = &internal_pre;
if (post_p == NULL)
post_p = &internal_post;
/* Remember the last statements added to PRE_P and POST_P. Every
new statement added by the gimplification helpers needs to be
annotated with location information. To centralize the
responsibility, we remember the last statement that had been
added to both queues before gimplifying *EXPR_P. If
gimplification produces new statements in PRE_P and POST_P, those
statements will be annotated with the same location information
as *EXPR_P. */
pre_last_gsi = gsi_last (*pre_p);
post_last_gsi = gsi_last (*post_p);
saved_location = input_location;
if (save_expr != error_mark_node
&& EXPR_HAS_LOCATION (*expr_p))
input_location = EXPR_LOCATION (*expr_p);
/* Loop over the specific gimplifiers until the toplevel node
remains the same. */
do
{
/* Strip away as many useless type conversions as possible
at the toplevel. */
STRIP_USELESS_TYPE_CONVERSION (*expr_p);
/* Remember the expr. */
save_expr = *expr_p;
/* Die, die, die, my darling. */
if (save_expr == error_mark_node
|| (TREE_TYPE (save_expr)
&& TREE_TYPE (save_expr) == error_mark_node))
{
ret = GS_ERROR;
break;
}
/* Do any language-specific gimplification. */
ret = ((enum gimplify_status)
lang_hooks.gimplify_expr (expr_p, pre_p, post_p));
if (ret == GS_OK)
{
if (*expr_p == NULL_TREE)
break;
if (*expr_p != save_expr)
continue;
}
else if (ret != GS_UNHANDLED)
break;
/* Make sure that all the cases set 'ret' appropriately. */
ret = GS_UNHANDLED;
switch (TREE_CODE (*expr_p))
{
/* First deal with the special cases. */
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
ret = gimplify_self_mod_expr (expr_p, pre_p, post_p,
fallback != fb_none,
TREE_TYPE (*expr_p));
break;
case VIEW_CONVERT_EXPR:
if (is_gimple_reg_type (TREE_TYPE (*expr_p))
&& is_gimple_reg_type (TREE_TYPE (TREE_OPERAND (*expr_p, 0))))
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
}
/* Fallthru. */
case ARRAY_REF:
case ARRAY_RANGE_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case COMPONENT_REF:
ret = gimplify_compound_lval (expr_p, pre_p, post_p,
fallback ? fallback : fb_rvalue);
break;
case COND_EXPR:
ret = gimplify_cond_expr (expr_p, pre_p, fallback);
/* C99 code may assign to an array in a structure value of a
conditional expression, and this has undefined behavior
only on execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p, false);
mark_addressable (*expr_p);
ret = GS_OK;
}
break;
case CALL_EXPR:
ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none);
/* C99 code may assign to an array in a structure returned
from a function, and this has undefined behavior only on
execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p, false);
mark_addressable (*expr_p);
ret = GS_OK;
}
break;
case TREE_LIST:
gcc_unreachable ();
case COMPOUND_EXPR:
ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none);
break;
case COMPOUND_LITERAL_EXPR:
ret = gimplify_compound_literal_expr (expr_p, pre_p,
gimple_test_f, fallback);
break;
case MODIFY_EXPR:
case INIT_EXPR:
ret = gimplify_modify_expr (expr_p, pre_p, post_p,
fallback != fb_none);
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
{
/* Preserve the original type of the expression and the
source location of the outer expression. */
tree org_type = TREE_TYPE (*expr_p);
*expr_p = gimple_boolify (*expr_p);
*expr_p = build3_loc (input_location, COND_EXPR,
org_type, *expr_p,
fold_convert_loc
(input_location,
org_type, boolean_true_node),
fold_convert_loc
(input_location,
org_type, boolean_false_node));
ret = GS_OK;
break;
}
case TRUTH_NOT_EXPR:
{
tree type = TREE_TYPE (*expr_p);
/* The parsers are careful to generate TRUTH_NOT_EXPR
only with operands that are always zero or one.
We do not fold here but handle the only interesting case
manually, as fold may re-introduce the TRUTH_NOT_EXPR. */
*expr_p = gimple_boolify (*expr_p);
if (TYPE_PRECISION (TREE_TYPE (*expr_p)) == 1)
*expr_p = build1_loc (input_location, BIT_NOT_EXPR,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0));
else
*expr_p = build2_loc (input_location, BIT_XOR_EXPR,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
build_int_cst (TREE_TYPE (*expr_p), 1));
if (!useless_type_conversion_p (type, TREE_TYPE (*expr_p)))
*expr_p = fold_convert_loc (input_location, type, *expr_p);
ret = GS_OK;
break;
}
case ADDR_EXPR:
ret = gimplify_addr_expr (expr_p, pre_p, post_p);
break;
case ANNOTATE_EXPR:
{
tree cond = TREE_OPERAND (*expr_p, 0);
tree kind = TREE_OPERAND (*expr_p, 1);
tree type = TREE_TYPE (cond);
if (!INTEGRAL_TYPE_P (type))
{
*expr_p = cond;
ret = GS_OK;
break;
}
tree tmp = create_tmp_var (type);
gimplify_arg (&cond, pre_p, EXPR_LOCATION (*expr_p));
gcall *call
= gimple_build_call_internal (IFN_ANNOTATE, 2, cond, kind);
gimple_call_set_lhs (call, tmp);
gimplify_seq_add_stmt (pre_p, call);
*expr_p = tmp;
ret = GS_ALL_DONE;
break;
}
case VA_ARG_EXPR:
ret = gimplify_va_arg_expr (expr_p, pre_p, post_p);
break;
CASE_CONVERT:
if (IS_EMPTY_STMT (*expr_p))
{
ret = GS_ALL_DONE;
break;
}
if (VOID_TYPE_P (TREE_TYPE (*expr_p))
|| fallback == fb_none)
{
/* Just strip a conversion to void (or in void context) and
try again. */
*expr_p = TREE_OPERAND (*expr_p, 0);
ret = GS_OK;
break;
}
ret = gimplify_conversion (expr_p);
if (ret == GS_ERROR)
break;
if (*expr_p != save_expr)
break;
/* FALLTHRU */
case FIX_TRUNC_EXPR:
/* unary_expr: ... | '(' cast ')' val | ... */
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
case INDIRECT_REF:
{
bool volatilep = TREE_THIS_VOLATILE (*expr_p);
bool notrap = TREE_THIS_NOTRAP (*expr_p);
tree saved_ptr_type = TREE_TYPE (TREE_OPERAND (*expr_p, 0));
*expr_p = fold_indirect_ref_loc (input_location, *expr_p);
if (*expr_p != save_expr)
{
ret = GS_OK;
break;
}
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_reg, fb_rvalue);
if (ret == GS_ERROR)
break;
recalculate_side_effects (*expr_p);
*expr_p = fold_build2_loc (input_location, MEM_REF,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
build_int_cst (saved_ptr_type, 0));
TREE_THIS_VOLATILE (*expr_p) = volatilep;
TREE_THIS_NOTRAP (*expr_p) = notrap;
ret = GS_OK;
break;
}
/* We arrive here through the various re-gimplifcation paths. */
case MEM_REF:
/* First try re-folding the whole thing. */
tmp = fold_binary (MEM_REF, TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
TREE_OPERAND (*expr_p, 1));
if (tmp)
{
REF_REVERSE_STORAGE_ORDER (tmp)
= REF_REVERSE_STORAGE_ORDER (*expr_p);
*expr_p = tmp;
recalculate_side_effects (*expr_p);
ret = GS_OK;
break;
}
/* Avoid re-gimplifying the address operand if it is already
in suitable form. Re-gimplifying would mark the address
operand addressable. Always gimplify when not in SSA form
as we still may have to gimplify decls with value-exprs. */
if (!gimplify_ctxp || !gimple_in_ssa_p (cfun)
|| !is_gimple_mem_ref_addr (TREE_OPERAND (*expr_p, 0)))
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_mem_ref_addr, fb_rvalue);
if (ret == GS_ERROR)
break;
}
recalculate_side_effects (*expr_p);
ret = GS_ALL_DONE;
break;
/* Constants need not be gimplified. */
case INTEGER_CST:
case REAL_CST:
case FIXED_CST:
case STRING_CST:
case COMPLEX_CST:
case VECTOR_CST:
/* Drop the overflow flag on constants, we do not want
that in the GIMPLE IL. */
if (TREE_OVERFLOW_P (*expr_p))
*expr_p = drop_tree_overflow (*expr_p);
ret = GS_ALL_DONE;
break;
case CONST_DECL:
/* If we require an lvalue, such as for ADDR_EXPR, retain the
CONST_DECL node. Otherwise the decl is replaceable by its
value. */
/* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */
if (fallback & fb_lvalue)
ret = GS_ALL_DONE;
else
{
*expr_p = DECL_INITIAL (*expr_p);
ret = GS_OK;
}
break;
case DECL_EXPR:
ret = gimplify_decl_expr (expr_p, pre_p);
break;
case BIND_EXPR:
ret = gimplify_bind_expr (expr_p, pre_p);
break;
case LOOP_EXPR:
ret = gimplify_loop_expr (expr_p, pre_p);
break;
case SWITCH_EXPR:
ret = gimplify_switch_expr (expr_p, pre_p);
break;
case EXIT_EXPR:
ret = gimplify_exit_expr (expr_p);
break;
case GOTO_EXPR:
/* If the target is not LABEL, then it is a computed jump
and the target needs to be gimplified. */
if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL)
{
ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p,
NULL, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
break;
}
gimplify_seq_add_stmt (pre_p,
gimple_build_goto (GOTO_DESTINATION (*expr_p)));
ret = GS_ALL_DONE;
break;
case PREDICT_EXPR:
gimplify_seq_add_stmt (pre_p,
gimple_build_predict (PREDICT_EXPR_PREDICTOR (*expr_p),
PREDICT_EXPR_OUTCOME (*expr_p)));
ret = GS_ALL_DONE;
break;
case LABEL_EXPR:
ret = gimplify_label_expr (expr_p, pre_p);
label = LABEL_EXPR_LABEL (*expr_p);
gcc_assert (decl_function_context (label) == current_function_decl);
/* If the label is used in a goto statement, or address of the label
is taken, we need to unpoison all variables that were seen so far.
Doing so would prevent us from reporting a false positives. */
if (asan_poisoned_variables
&& asan_used_labels != NULL
&& asan_used_labels->contains (label))
asan_poison_variables (asan_poisoned_variables, false, pre_p);
break;
case CASE_LABEL_EXPR:
ret = gimplify_case_label_expr (expr_p, pre_p);
if (gimplify_ctxp->live_switch_vars)
asan_poison_variables (gimplify_ctxp->live_switch_vars, false,
pre_p);
break;
case RETURN_EXPR:
ret = gimplify_return_expr (*expr_p, pre_p);
break;
case CONSTRUCTOR:
/* Don't reduce this in place; let gimplify_init_constructor work its
magic. Buf if we're just elaborating this for side effects, just
gimplify any element that has side-effects. */
if (fallback == fb_none)
{
unsigned HOST_WIDE_INT ix;
tree val;
tree temp = NULL_TREE;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (*expr_p), ix, val)
if (TREE_SIDE_EFFECTS (val))
append_to_statement_list (val, &temp);
*expr_p = temp;
ret = temp ? GS_OK : GS_ALL_DONE;
}
/* C99 code may assign to an array in a constructed
structure or union, and this has undefined behavior only
on execution, so create a temporary if an lvalue is
required. */
else if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p, false);
mark_addressable (*expr_p);
ret = GS_OK;
}
else
ret = GS_ALL_DONE;
break;
/* The following are special cases that are not handled by the
original GIMPLE grammar. */
/* SAVE_EXPR nodes are converted into a GIMPLE identifier and
eliminated. */
case SAVE_EXPR:
ret = gimplify_save_expr (expr_p, pre_p, post_p);
break;
case BIT_FIELD_REF:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_lvalue, fb_either);
recalculate_side_effects (*expr_p);
break;
case TARGET_MEM_REF:
{
enum gimplify_status r0 = GS_ALL_DONE, r1 = GS_ALL_DONE;
if (TMR_BASE (*expr_p))
r0 = gimplify_expr (&TMR_BASE (*expr_p), pre_p,
post_p, is_gimple_mem_ref_addr, fb_either);
if (TMR_INDEX (*expr_p))
r1 = gimplify_expr (&TMR_INDEX (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
if (TMR_INDEX2 (*expr_p))
r1 = gimplify_expr (&TMR_INDEX2 (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
/* TMR_STEP and TMR_OFFSET are always integer constants. */
ret = MIN (r0, r1);
}
break;
case NON_LVALUE_EXPR:
/* This should have been stripped above. */
gcc_unreachable ();
case ASM_EXPR:
ret = gimplify_asm_expr (expr_p, pre_p, post_p);
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
{
gimple_seq eval, cleanup;
gtry *try_;
/* Calls to destructors are generated automatically in FINALLY/CATCH
block. They should have location as UNKNOWN_LOCATION. However,
gimplify_call_expr will reset these call stmts to input_location
if it finds stmt's location is unknown. To prevent resetting for
destructors, we set the input_location to unknown.
Note that this only affects the destructor calls in FINALLY/CATCH
block, and will automatically reset to its original value by the
end of gimplify_expr. */
input_location = UNKNOWN_LOCATION;
eval = cleanup = NULL;
gimplify_and_add (TREE_OPERAND (*expr_p, 0), &eval);
gimplify_and_add (TREE_OPERAND (*expr_p, 1), &cleanup);
/* Don't create bogus GIMPLE_TRY with empty cleanup. */
if (gimple_seq_empty_p (cleanup))
{
gimple_seq_add_seq (pre_p, eval);
ret = GS_ALL_DONE;
break;
}
try_ = gimple_build_try (eval, cleanup,
TREE_CODE (*expr_p) == TRY_FINALLY_EXPR
? GIMPLE_TRY_FINALLY
: GIMPLE_TRY_CATCH);
if (EXPR_HAS_LOCATION (save_expr))
gimple_set_location (try_, EXPR_LOCATION (save_expr));
else if (LOCATION_LOCUS (saved_location) != UNKNOWN_LOCATION)
gimple_set_location (try_, saved_location);
if (TREE_CODE (*expr_p) == TRY_CATCH_EXPR)
gimple_try_set_catch_is_cleanup (try_,
TRY_CATCH_IS_CLEANUP (*expr_p));
gimplify_seq_add_stmt (pre_p, try_);
ret = GS_ALL_DONE;
break;
}
case CLEANUP_POINT_EXPR:
ret = gimplify_cleanup_point_expr (expr_p, pre_p);
break;
case TARGET_EXPR:
ret = gimplify_target_expr (expr_p, pre_p, post_p);
break;
case CATCH_EXPR:
{
gimple *c;
gimple_seq handler = NULL;
gimplify_and_add (CATCH_BODY (*expr_p), &handler);
c = gimple_build_catch (CATCH_TYPES (*expr_p), handler);
gimplify_seq_add_stmt (pre_p, c);
ret = GS_ALL_DONE;
break;
}
case EH_FILTER_EXPR:
{
gimple *ehf;
gimple_seq failure = NULL;
gimplify_and_add (EH_FILTER_FAILURE (*expr_p), &failure);
ehf = gimple_build_eh_filter (EH_FILTER_TYPES (*expr_p), failure);
gimple_set_no_warning (ehf, TREE_NO_WARNING (*expr_p));
gimplify_seq_add_stmt (pre_p, ehf);
ret = GS_ALL_DONE;
break;
}
case OBJ_TYPE_REF:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
TREE_SIDE_EFFECTS (*expr_p) = 0;
ret = MIN (r0, r1);
}
break;
case LABEL_DECL:
/* We get here when taking the address of a label. We mark
the label as "forced"; meaning it can never be removed and
it is a potential target for any computed goto. */
FORCED_LABEL (*expr_p) = 1;
ret = GS_ALL_DONE;
break;
case STATEMENT_LIST:
ret = gimplify_statement_list (expr_p, pre_p);
break;
case WITH_SIZE_EXPR:
{
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p == &internal_post ? NULL : post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = GS_ALL_DONE;
}
break;
case VAR_DECL:
case PARM_DECL:
ret = gimplify_var_or_parm_decl (expr_p);
break;
case RESULT_DECL:
/* When within an OMP context, notice uses of variables. */
if (gimplify_omp_ctxp)
omp_notice_variable (gimplify_omp_ctxp, *expr_p, true);
ret = GS_ALL_DONE;
break;
case SSA_NAME:
/* Allow callbacks into the gimplifier during optimization. */
ret = GS_ALL_DONE;
break;
case OMP_PARALLEL:
gimplify_omp_parallel (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_TASK:
gimplify_omp_task (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_FOR:
case OMP_SIMD:
case CILK_SIMD:
case CILK_FOR:
case OMP_DISTRIBUTE:
case OMP_TASKLOOP:
case OACC_LOOP:
ret = gimplify_omp_for (expr_p, pre_p);
break;
case OACC_CACHE:
gimplify_oacc_cache (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OACC_DECLARE:
gimplify_oacc_declare (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OACC_HOST_DATA:
case OACC_DATA:
case OACC_KERNELS:
case OACC_PARALLEL:
case OMP_SECTIONS:
case OMP_SINGLE:
case OMP_TARGET:
case OMP_TARGET_DATA:
case OMP_TEAMS:
gimplify_omp_workshare (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_UPDATE:
case OMP_TARGET_UPDATE:
case OMP_TARGET_ENTER_DATA:
case OMP_TARGET_EXIT_DATA:
gimplify_omp_target_update (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_SECTION:
case OMP_MASTER:
case OMP_TASKGROUP:
case OMP_ORDERED:
case OMP_CRITICAL:
{
gimple_seq body = NULL;
gimple *g;
gimplify_and_add (OMP_BODY (*expr_p), &body);
switch (TREE_CODE (*expr_p))
{
case OMP_SECTION:
g = gimple_build_omp_section (body);
break;
case OMP_MASTER:
g = gimple_build_omp_master (body);
break;
case OMP_TASKGROUP:
{
gimple_seq cleanup = NULL;
tree fn
= builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_END);
g = gimple_build_call (fn, 0);
gimple_seq_add_stmt (&cleanup, g);
g = gimple_build_try (body, cleanup, GIMPLE_TRY_FINALLY);
body = NULL;
gimple_seq_add_stmt (&body, g);
g = gimple_build_omp_taskgroup (body);
}
break;
case OMP_ORDERED:
g = gimplify_omp_ordered (*expr_p, body);
break;
case OMP_CRITICAL:
gimplify_scan_omp_clauses (&OMP_CRITICAL_CLAUSES (*expr_p),
pre_p, ORT_WORKSHARE, OMP_CRITICAL);
gimplify_adjust_omp_clauses (pre_p, body,
&OMP_CRITICAL_CLAUSES (*expr_p),
OMP_CRITICAL);
g = gimple_build_omp_critical (body,
OMP_CRITICAL_NAME (*expr_p),
OMP_CRITICAL_CLAUSES (*expr_p));
break;
default:
gcc_unreachable ();
}
gimplify_seq_add_stmt (pre_p, g);
ret = GS_ALL_DONE;
break;
}
case OMP_ATOMIC:
case OMP_ATOMIC_READ:
case OMP_ATOMIC_CAPTURE_OLD:
case OMP_ATOMIC_CAPTURE_NEW:
ret = gimplify_omp_atomic (expr_p, pre_p);
break;
case TRANSACTION_EXPR:
ret = gimplify_transaction (expr_p, pre_p);
break;
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
{
tree orig_type = TREE_TYPE (*expr_p);
tree new_type, xop0, xop1;
*expr_p = gimple_boolify (*expr_p);
new_type = TREE_TYPE (*expr_p);
if (!useless_type_conversion_p (orig_type, new_type))
{
*expr_p = fold_convert_loc (input_location, orig_type, *expr_p);
ret = GS_OK;
break;
}
/* Boolified binary truth expressions are semantically equivalent
to bitwise binary expressions. Canonicalize them to the
bitwise variant. */
switch (TREE_CODE (*expr_p))
{
case TRUTH_AND_EXPR:
TREE_SET_CODE (*expr_p, BIT_AND_EXPR);
break;
case TRUTH_OR_EXPR:
TREE_SET_CODE (*expr_p, BIT_IOR_EXPR);
break;
case TRUTH_XOR_EXPR:
TREE_SET_CODE (*expr_p, BIT_XOR_EXPR);
break;
default:
break;
}
/* Now make sure that operands have compatible type to
expression's new_type. */
xop0 = TREE_OPERAND (*expr_p, 0);
xop1 = TREE_OPERAND (*expr_p, 1);
if (!useless_type_conversion_p (new_type, TREE_TYPE (xop0)))
TREE_OPERAND (*expr_p, 0) = fold_convert_loc (input_location,
new_type,
xop0);
if (!useless_type_conversion_p (new_type, TREE_TYPE (xop1)))
TREE_OPERAND (*expr_p, 1) = fold_convert_loc (input_location,
new_type,
xop1);
/* Continue classified as tcc_binary. */
goto expr_2;
}
case VEC_COND_EXPR:
{
enum gimplify_status r0, r1, r2;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_condexpr, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (MIN (r0, r1), r2);
recalculate_side_effects (*expr_p);
}
break;
case FMA_EXPR:
case VEC_PERM_EXPR:
/* Classified as tcc_expression. */
goto expr_3;
case BIT_INSERT_EXPR:
/* Argument 3 is a constant. */
goto expr_2;
case POINTER_PLUS_EXPR:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
ret = MIN (r0, r1);
break;
}
case CILK_SYNC_STMT:
{
if (!fn_contains_cilk_spawn_p (cfun))
{
error_at (EXPR_LOCATION (*expr_p),
"expected %<_Cilk_spawn%> before %<_Cilk_sync%>");
ret = GS_ERROR;
}
else
{
gimplify_cilk_sync (expr_p, pre_p);
ret = GS_ALL_DONE;
}
break;
}
default:
switch (TREE_CODE_CLASS (TREE_CODE (*expr_p)))
{
case tcc_comparison:
/* Handle comparison of objects of non scalar mode aggregates
with a call to memcmp. It would be nice to only have to do
this for variable-sized objects, but then we'd have to allow
the same nest of reference nodes we allow for MODIFY_EXPR and
that's too complex.
Compare scalar mode aggregates as scalar mode values. Using
memcmp for them would be very inefficient at best, and is
plain wrong if bitfields are involved. */
{
tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1));
/* Vector comparisons need no boolification. */
if (TREE_CODE (type) == VECTOR_TYPE)
goto expr_2;
else if (!AGGREGATE_TYPE_P (type))
{
tree org_type = TREE_TYPE (*expr_p);
*expr_p = gimple_boolify (*expr_p);
if (!useless_type_conversion_p (org_type,
TREE_TYPE (*expr_p)))
{
*expr_p = fold_convert_loc (input_location,
org_type, *expr_p);
ret = GS_OK;
}
else
goto expr_2;
}
else if (TYPE_MODE (type) != BLKmode)
ret = gimplify_scalar_mode_aggregate_compare (expr_p);
else
ret = gimplify_variable_sized_compare (expr_p);
break;
}
/* If *EXPR_P does not need to be special-cased, handle it
according to its class. */
case tcc_unary:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
break;
case tcc_binary:
expr_2:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (r0, r1);
break;
}
expr_3:
{
enum gimplify_status r0, r1, r2;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (MIN (r0, r1), r2);
break;
}
case tcc_declaration:
case tcc_constant:
ret = GS_ALL_DONE;
goto dont_recalculate;
default:
gcc_unreachable ();
}
recalculate_side_effects (*expr_p);
dont_recalculate:
break;
}
gcc_assert (*expr_p || ret != GS_OK);
}
while (ret == GS_OK);
/* If we encountered an error_mark somewhere nested inside, either
stub out the statement or propagate the error back out. */
if (ret == GS_ERROR)
{
if (is_statement)
*expr_p = NULL;
goto out;
}
/* This was only valid as a return value from the langhook, which
we handled. Make sure it doesn't escape from any other context. */
gcc_assert (ret != GS_UNHANDLED);
if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p))
{
/* We aren't looking for a value, and we don't have a valid
statement. If it doesn't have side-effects, throw it away.
We can also get here with code such as "*&&L;", where L is
a LABEL_DECL that is marked as FORCED_LABEL. */
if (TREE_CODE (*expr_p) == LABEL_DECL
|| !TREE_SIDE_EFFECTS (*expr_p))
*expr_p = NULL;
else if (!TREE_THIS_VOLATILE (*expr_p))
{
/* This is probably a _REF that contains something nested that
has side effects. Recurse through the operands to find it. */
enum tree_code code = TREE_CODE (*expr_p);
switch (code)
{
case COMPONENT_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case VIEW_CONVERT_EXPR:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
gimple_test_f, fallback);
break;
default:
/* Anything else with side-effects must be converted to
a valid statement before we get here. */
gcc_unreachable ();
}
*expr_p = NULL;
}
else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p))
&& TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode)
{
/* Historically, the compiler has treated a bare reference
to a non-BLKmode volatile lvalue as forcing a load. */
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p));
/* Normally, we do not want to create a temporary for a
TREE_ADDRESSABLE type because such a type should not be
copied by bitwise-assignment. However, we make an
exception here, as all we are doing here is ensuring that
we read the bytes that make up the type. We use
create_tmp_var_raw because create_tmp_var will abort when
given a TREE_ADDRESSABLE type. */
tree tmp = create_tmp_var_raw (type, "vol");
gimple_add_tmp_var (tmp);
gimplify_assign (tmp, *expr_p, pre_p);
*expr_p = NULL;
}
else
/* We can't do anything useful with a volatile reference to
an incomplete type, so just throw it away. Likewise for
a BLKmode type, since any implicit inner load should
already have been turned into an explicit one by the
gimplification process. */
*expr_p = NULL;
}
/* If we are gimplifying at the statement level, we're done. Tack
everything together and return. */
if (fallback == fb_none || is_statement)
{
/* Since *EXPR_P has been converted into a GIMPLE tuple, clear
it out for GC to reclaim it. */
*expr_p = NULL_TREE;
if (!gimple_seq_empty_p (internal_pre)
|| !gimple_seq_empty_p (internal_post))
{
gimplify_seq_add_seq (&internal_pre, internal_post);
gimplify_seq_add_seq (pre_p, internal_pre);
}
/* The result of gimplifying *EXPR_P is going to be the last few
statements in *PRE_P and *POST_P. Add location information
to all the statements that were added by the gimplification
helpers. */
if (!gimple_seq_empty_p (*pre_p))
annotate_all_with_location_after (*pre_p, pre_last_gsi, input_location);
if (!gimple_seq_empty_p (*post_p))
annotate_all_with_location_after (*post_p, post_last_gsi,
input_location);
goto out;
}
#ifdef ENABLE_GIMPLE_CHECKING
if (*expr_p)
{
enum tree_code code = TREE_CODE (*expr_p);
/* These expressions should already be in gimple IR form. */
gcc_assert (code != MODIFY_EXPR
&& code != ASM_EXPR
&& code != BIND_EXPR
&& code != CATCH_EXPR
&& (code != COND_EXPR || gimplify_ctxp->allow_rhs_cond_expr)
&& code != EH_FILTER_EXPR
&& code != GOTO_EXPR
&& code != LABEL_EXPR
&& code != LOOP_EXPR
&& code != SWITCH_EXPR
&& code != TRY_FINALLY_EXPR
&& code != OACC_PARALLEL
&& code != OACC_KERNELS
&& code != OACC_DATA
&& code != OACC_HOST_DATA
&& code != OACC_DECLARE
&& code != OACC_UPDATE
&& code != OACC_ENTER_DATA
&& code != OACC_EXIT_DATA
&& code != OACC_CACHE
&& code != OMP_CRITICAL
&& code != OMP_FOR
&& code != OACC_LOOP
&& code != OMP_MASTER
&& code != OMP_TASKGROUP
&& code != OMP_ORDERED
&& code != OMP_PARALLEL
&& code != OMP_SECTIONS
&& code != OMP_SECTION
&& code != OMP_SINGLE);
}
#endif
/* Otherwise we're gimplifying a subexpression, so the resulting
value is interesting. If it's a valid operand that matches
GIMPLE_TEST_F, we're done. Unless we are handling some
post-effects internally; if that's the case, we need to copy into
a temporary before adding the post-effects to POST_P. */
if (gimple_seq_empty_p (internal_post) && (*gimple_test_f) (*expr_p))
goto out;
/* Otherwise, we need to create a new temporary for the gimplified
expression. */
/* We can't return an lvalue if we have an internal postqueue. The
object the lvalue refers to would (probably) be modified by the
postqueue; we need to copy the value out first, which means an
rvalue. */
if ((fallback & fb_lvalue)
&& gimple_seq_empty_p (internal_post)
&& is_gimple_addressable (*expr_p))
{
/* An lvalue will do. Take the address of the expression, store it
in a temporary, and replace the expression with an INDIRECT_REF of
that temporary. */
tmp = build_fold_addr_expr_loc (input_location, *expr_p);
gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue);
*expr_p = build_simple_mem_ref (tmp);
}
else if ((fallback & fb_rvalue) && is_gimple_reg_rhs_or_call (*expr_p))
{
/* An rvalue will do. Assign the gimplified expression into a
new temporary TMP and replace the original expression with
TMP. First, make sure that the expression has a type so that
it can be assigned into a temporary. */
gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p)));
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
}
else
{
#ifdef ENABLE_GIMPLE_CHECKING
if (!(fallback & fb_mayfail))
{
fprintf (stderr, "gimplification failed:\n");
print_generic_expr (stderr, *expr_p, 0);
debug_tree (*expr_p);
internal_error ("gimplification failed");
}
#endif
gcc_assert (fallback & fb_mayfail);
/* If this is an asm statement, and the user asked for the
impossible, don't die. Fail and let gimplify_asm_expr
issue an error. */
ret = GS_ERROR;
goto out;
}
/* Make sure the temporary matches our predicate. */
gcc_assert ((*gimple_test_f) (*expr_p));
if (!gimple_seq_empty_p (internal_post))
{
annotate_all_with_location (internal_post, input_location);
gimplify_seq_add_seq (pre_p, internal_post);
}
out:
input_location = saved_location;
return ret;
}
/* Like gimplify_expr but make sure the gimplified result is not itself
a SSA name (but a decl if it were). Temporaries required by
evaluating *EXPR_P may be still SSA names. */
static enum gimplify_status
gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool (*gimple_test_f) (tree), fallback_t fallback,
bool allow_ssa)
{
bool was_ssa_name_p = TREE_CODE (*expr_p) == SSA_NAME;
enum gimplify_status ret = gimplify_expr (expr_p, pre_p, post_p,
gimple_test_f, fallback);
if (! allow_ssa
&& TREE_CODE (*expr_p) == SSA_NAME)
{
tree name = *expr_p;
if (was_ssa_name_p)
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, NULL, false);
else
{
/* Avoid the extra copy if possible. */
*expr_p = create_tmp_reg (TREE_TYPE (name));
gimple_set_lhs (SSA_NAME_DEF_STMT (name), *expr_p);
release_ssa_name (name);
}
}
return ret;
}
/* Look through TYPE for variable-sized objects and gimplify each such
size that we find. Add to LIST_P any statements generated. */
void
gimplify_type_sizes (tree type, gimple_seq *list_p)
{
tree field, t;
if (type == NULL || type == error_mark_node)
return;
/* We first do the main variant, then copy into any other variants. */
type = TYPE_MAIN_VARIANT (type);
/* Avoid infinite recursion. */
if (TYPE_SIZES_GIMPLIFIED (type))
return;
TYPE_SIZES_GIMPLIFIED (type) = 1;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p);
gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type);
TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type);
}
break;
case ARRAY_TYPE:
/* These types may not have declarations, so handle them here. */
gimplify_type_sizes (TREE_TYPE (type), list_p);
gimplify_type_sizes (TYPE_DOMAIN (type), list_p);
/* Ensure VLA bounds aren't removed, for -O0 they should be variables
with assigned stack slots, for -O1+ -g they should be tracked
by VTA. */
if (!(TYPE_NAME (type)
&& TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
&& DECL_IGNORED_P (TYPE_NAME (type)))
&& TYPE_DOMAIN (type)
&& INTEGRAL_TYPE_P (TYPE_DOMAIN (type)))
{
t = TYPE_MIN_VALUE (TYPE_DOMAIN (type));
if (t && VAR_P (t) && DECL_ARTIFICIAL (t))
DECL_IGNORED_P (t) = 0;
t = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
if (t && VAR_P (t) && DECL_ARTIFICIAL (t))
DECL_IGNORED_P (t) = 0;
}
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p);
gimplify_one_sizepos (&DECL_SIZE (field), list_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (field), list_p);
gimplify_type_sizes (TREE_TYPE (field), list_p);
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
/* We used to recurse on the pointed-to type here, which turned out to
be incorrect because its definition might refer to variables not
yet initialized at this point if a forward declaration is involved.
It was actually useful for anonymous pointed-to types to ensure
that the sizes evaluation dominates every possible later use of the
values. Restricting to such types here would be safe since there
is no possible forward declaration around, but would introduce an
undesirable middle-end semantic to anonymity. We then defer to
front-ends the responsibility of ensuring that the sizes are
evaluated both early and late enough, e.g. by attaching artificial
type declarations to the tree. */
break;
default:
break;
}
gimplify_one_sizepos (&TYPE_SIZE (type), list_p);
gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_SIZE (t) = TYPE_SIZE (type);
TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type);
TYPE_SIZES_GIMPLIFIED (t) = 1;
}
}
/* A subroutine of gimplify_type_sizes to make sure that *EXPR_P,
a size or position, has had all of its SAVE_EXPRs evaluated.
We add any required statements to *STMT_P. */
void
gimplify_one_sizepos (tree *expr_p, gimple_seq *stmt_p)
{
tree expr = *expr_p;
/* We don't do anything if the value isn't there, is constant, or contains
A PLACEHOLDER_EXPR. We also don't want to do anything if it's already
a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier
will want to replace it with a new variable, but that will cause problems
if this type is from outside the function. It's OK to have that here. */
if (is_gimple_sizepos (expr))
return;
*expr_p = unshare_expr (expr);
/* SSA names in decl/type fields are a bad idea - they'll get reclaimed
if the def vanishes. */
gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue, false);
}
/* Gimplify the body of statements of FNDECL and return a GIMPLE_BIND node
containing the sequence of corresponding GIMPLE statements. If DO_PARMS
is true, also gimplify the parameters. */
gbind *
gimplify_body (tree fndecl, bool do_parms)
{
location_t saved_location = input_location;
gimple_seq parm_stmts, seq;
gimple *outer_stmt;
gbind *outer_bind;
struct cgraph_node *cgn;
timevar_push (TV_TREE_GIMPLIFY);
init_tree_ssa (cfun);
/* Initialize for optimize_insn_for_s{ize,peed}_p possibly called during
gimplification. */
default_rtl_profile ();
gcc_assert (gimplify_ctxp == NULL);
push_gimplify_context (true);
if (flag_openacc || flag_openmp)
{
gcc_assert (gimplify_omp_ctxp == NULL);
if (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (fndecl)))
gimplify_omp_ctxp = new_omp_context (ORT_TARGET);
}
/* Unshare most shared trees in the body and in that of any nested functions.
It would seem we don't have to do this for nested functions because
they are supposed to be output and then the outer function gimplified
first, but the g++ front end doesn't always do it that way. */
unshare_body (fndecl);
unvisit_body (fndecl);
cgn = cgraph_node::get (fndecl);
if (cgn && cgn->origin)
nonlocal_vlas = new hash_set<tree>;
/* Make sure input_location isn't set to something weird. */
input_location = DECL_SOURCE_LOCATION (fndecl);
/* Resolve callee-copies. This has to be done before processing
the body so that DECL_VALUE_EXPR gets processed correctly. */
parm_stmts = do_parms ? gimplify_parameters () : NULL;
/* Gimplify the function's body. */
seq = NULL;
gimplify_stmt (&DECL_SAVED_TREE (fndecl), &seq);
outer_stmt = gimple_seq_first_stmt (seq);
if (!outer_stmt)
{
outer_stmt = gimple_build_nop ();
gimplify_seq_add_stmt (&seq, outer_stmt);
}
/* The body must contain exactly one statement, a GIMPLE_BIND. If this is
not the case, wrap everything in a GIMPLE_BIND to make it so. */
if (gimple_code (outer_stmt) == GIMPLE_BIND
&& gimple_seq_first (seq) == gimple_seq_last (seq))
outer_bind = as_a <gbind *> (outer_stmt);
else
outer_bind = gimple_build_bind (NULL_TREE, seq, NULL);
DECL_SAVED_TREE (fndecl) = NULL_TREE;
/* If we had callee-copies statements, insert them at the beginning
of the function and clear DECL_VALUE_EXPR_P on the parameters. */
if (!gimple_seq_empty_p (parm_stmts))
{
tree parm;
gimplify_seq_add_seq (&parm_stmts, gimple_bind_body (outer_bind));
gimple_bind_set_body (outer_bind, parm_stmts);
for (parm = DECL_ARGUMENTS (current_function_decl);
parm; parm = DECL_CHAIN (parm))
if (DECL_HAS_VALUE_EXPR_P (parm))
{
DECL_HAS_VALUE_EXPR_P (parm) = 0;
DECL_IGNORED_P (parm) = 0;
}
}
if (nonlocal_vlas)
{
if (nonlocal_vla_vars)
{
/* tree-nested.c may later on call declare_vars (..., true);
which relies on BLOCK_VARS chain to be the tail of the
gimple_bind_vars chain. Ensure we don't violate that
assumption. */
if (gimple_bind_block (outer_bind)
== DECL_INITIAL (current_function_decl))
declare_vars (nonlocal_vla_vars, outer_bind, true);
else
BLOCK_VARS (DECL_INITIAL (current_function_decl))
= chainon (BLOCK_VARS (DECL_INITIAL (current_function_decl)),
nonlocal_vla_vars);
nonlocal_vla_vars = NULL_TREE;
}
delete nonlocal_vlas;
nonlocal_vlas = NULL;
}
if ((flag_openacc || flag_openmp || flag_openmp_simd)
&& gimplify_omp_ctxp)
{
delete_omp_context (gimplify_omp_ctxp);
gimplify_omp_ctxp = NULL;
}
pop_gimplify_context (outer_bind);
gcc_assert (gimplify_ctxp == NULL);
if (flag_checking && !seen_error ())
verify_gimple_in_seq (gimple_bind_body (outer_bind));
timevar_pop (TV_TREE_GIMPLIFY);
input_location = saved_location;
return outer_bind;
}
typedef char *char_p; /* For DEF_VEC_P. */
/* Return whether we should exclude FNDECL from instrumentation. */
static bool
flag_instrument_functions_exclude_p (tree fndecl)
{
vec<char_p> *v;
v = (vec<char_p> *) flag_instrument_functions_exclude_functions;
if (v && v->length () > 0)
{
const char *name;
int i;
char *s;
name = lang_hooks.decl_printable_name (fndecl, 0);
FOR_EACH_VEC_ELT (*v, i, s)
if (strstr (name, s) != NULL)
return true;
}
v = (vec<char_p> *) flag_instrument_functions_exclude_files;
if (v && v->length () > 0)
{
const char *name;
int i;
char *s;
name = DECL_SOURCE_FILE (fndecl);
FOR_EACH_VEC_ELT (*v, i, s)
if (strstr (name, s) != NULL)
return true;
}
return false;
}
/* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL
node for the function we want to gimplify.
Return the sequence of GIMPLE statements corresponding to the body
of FNDECL. */
void
gimplify_function_tree (tree fndecl)
{
tree parm, ret;
gimple_seq seq;
gbind *bind;
gcc_assert (!gimple_body (fndecl));
if (DECL_STRUCT_FUNCTION (fndecl))
push_cfun (DECL_STRUCT_FUNCTION (fndecl));
else
push_struct_function (fndecl);
/* Tentatively set PROP_gimple_lva here, and reset it in gimplify_va_arg_expr
if necessary. */
cfun->curr_properties |= PROP_gimple_lva;
for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = DECL_CHAIN (parm))
{
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (parm)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (parm)
&& !needs_to_live_in_memory (parm))
DECL_GIMPLE_REG_P (parm) = 1;
}
ret = DECL_RESULT (fndecl);
if ((TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (ret)) == VECTOR_TYPE)
&& !needs_to_live_in_memory (ret))
DECL_GIMPLE_REG_P (ret) = 1;
if (asan_sanitize_use_after_scope () && !asan_no_sanitize_address_p ())
asan_poisoned_variables = new hash_set<tree> ();
bind = gimplify_body (fndecl, true);
if (asan_poisoned_variables)
{
delete asan_poisoned_variables;
asan_poisoned_variables = NULL;
}
/* The tree body of the function is no longer needed, replace it
with the new GIMPLE body. */
seq = NULL;
gimple_seq_add_stmt (&seq, bind);
gimple_set_body (fndecl, seq);
/* If we're instrumenting function entry/exit, then prepend the call to
the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to
catch the exit hook. */
/* ??? Add some way to ignore exceptions for this TFE. */
if (flag_instrument_function_entry_exit
&& !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl)
/* Do not instrument extern inline functions. */
&& !(DECL_DECLARED_INLINE_P (fndecl)
&& DECL_EXTERNAL (fndecl)
&& DECL_DISREGARD_INLINE_LIMITS (fndecl))
&& !flag_instrument_functions_exclude_p (fndecl))
{
tree x;
gbind *new_bind;
gimple *tf;
gimple_seq cleanup = NULL, body = NULL;
tree tmp_var;
gcall *call;
x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS);
call = gimple_build_call (x, 1, integer_zero_node);
tmp_var = create_tmp_var (ptr_type_node, "return_addr");
gimple_call_set_lhs (call, tmp_var);
gimplify_seq_add_stmt (&cleanup, call);
x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_EXIT);
call = gimple_build_call (x, 2,
build_fold_addr_expr (current_function_decl),
tmp_var);
gimplify_seq_add_stmt (&cleanup, call);
tf = gimple_build_try (seq, cleanup, GIMPLE_TRY_FINALLY);
x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS);
call = gimple_build_call (x, 1, integer_zero_node);
tmp_var = create_tmp_var (ptr_type_node, "return_addr");
gimple_call_set_lhs (call, tmp_var);
gimplify_seq_add_stmt (&body, call);
x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_ENTER);
call = gimple_build_call (x, 2,
build_fold_addr_expr (current_function_decl),
tmp_var);
gimplify_seq_add_stmt (&body, call);
gimplify_seq_add_stmt (&body, tf);
new_bind = gimple_build_bind (NULL, body, NULL);
/* Replace the current function body with the body
wrapped in the try/finally TF. */
seq = NULL;
gimple_seq_add_stmt (&seq, new_bind);
gimple_set_body (fndecl, seq);
bind = new_bind;
}
if ((flag_sanitize & SANITIZE_THREAD) != 0
&& !lookup_attribute ("no_sanitize_thread", DECL_ATTRIBUTES (fndecl)))
{
gcall *call = gimple_build_call_internal (IFN_TSAN_FUNC_EXIT, 0);
gimple *tf = gimple_build_try (seq, call, GIMPLE_TRY_FINALLY);
gbind *new_bind = gimple_build_bind (NULL, tf, NULL);
/* Replace the current function body with the body
wrapped in the try/finally TF. */
seq = NULL;
gimple_seq_add_stmt (&seq, new_bind);
gimple_set_body (fndecl, seq);
}
DECL_SAVED_TREE (fndecl) = NULL_TREE;
cfun->curr_properties |= PROP_gimple_any;
pop_cfun ();
dump_function (TDI_generic, fndecl);
}
/* Return a dummy expression of type TYPE in order to keep going after an
error. */
static tree
dummy_object (tree type)
{
tree t = build_int_cst (build_pointer_type (type), 0);
return build2 (MEM_REF, type, t, t);
}
/* Gimplify __builtin_va_arg, aka VA_ARG_EXPR, which is not really a
builtin function, but a very special sort of operator. */
enum gimplify_status
gimplify_va_arg_expr (tree *expr_p, gimple_seq *pre_p,
gimple_seq *post_p ATTRIBUTE_UNUSED)
{
tree promoted_type, have_va_type;
tree valist = TREE_OPERAND (*expr_p, 0);
tree type = TREE_TYPE (*expr_p);
tree t, tag, aptag;
location_t loc = EXPR_LOCATION (*expr_p);
/* Verify that valist is of the proper type. */
have_va_type = TREE_TYPE (valist);
if (have_va_type == error_mark_node)
return GS_ERROR;
have_va_type = targetm.canonical_va_list_type (have_va_type);
if (have_va_type == NULL_TREE
&& POINTER_TYPE_P (TREE_TYPE (valist)))
/* Handle 'Case 1: Not an array type' from c-common.c/build_va_arg. */
have_va_type
= targetm.canonical_va_list_type (TREE_TYPE (TREE_TYPE (valist)));
gcc_assert (have_va_type != NULL_TREE);
/* Generate a diagnostic for requesting data of a type that cannot
be passed through `...' due to type promotion at the call site. */
if ((promoted_type = lang_hooks.types.type_promotes_to (type))
!= type)
{
static bool gave_help;
bool warned;
/* Use the expansion point to handle cases such as passing bool (defined
in a system header) through `...'. */
source_location xloc
= expansion_point_location_if_in_system_header (loc);
/* Unfortunately, this is merely undefined, rather than a constraint
violation, so we cannot make this an error. If this call is never
executed, the program is still strictly conforming. */
warned = warning_at (xloc, 0,
"%qT is promoted to %qT when passed through %<...%>",
type, promoted_type);
if (!gave_help && warned)
{
gave_help = true;
inform (xloc, "(so you should pass %qT not %qT to %<va_arg%>)",
promoted_type, type);
}
/* We can, however, treat "undefined" any way we please.
Call abort to encourage the user to fix the program. */
if (warned)
inform (xloc, "if this code is reached, the program will abort");
/* Before the abort, allow the evaluation of the va_list
expression to exit or longjmp. */
gimplify_and_add (valist, pre_p);
t = build_call_expr_loc (loc,
builtin_decl_implicit (BUILT_IN_TRAP), 0);
gimplify_and_add (t, pre_p);
/* This is dead code, but go ahead and finish so that the
mode of the result comes out right. */
*expr_p = dummy_object (type);
return GS_ALL_DONE;
}
tag = build_int_cst (build_pointer_type (type), 0);
aptag = build_int_cst (TREE_TYPE (valist), 0);
*expr_p = build_call_expr_internal_loc (loc, IFN_VA_ARG, type, 3,
valist, tag, aptag);
/* Clear the tentatively set PROP_gimple_lva, to indicate that IFN_VA_ARG
needs to be expanded. */
cfun->curr_properties &= ~PROP_gimple_lva;
return GS_OK;
}
/* Build a new GIMPLE_ASSIGN tuple and append it to the end of *SEQ_P.
DST/SRC are the destination and source respectively. You can pass
ungimplified trees in DST or SRC, in which case they will be
converted to a gimple operand if necessary.
This function returns the newly created GIMPLE_ASSIGN tuple. */
gimple *
gimplify_assign (tree dst, tree src, gimple_seq *seq_p)
{
tree t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
gimplify_and_add (t, seq_p);
ggc_free (t);
return gimple_seq_last_stmt (*seq_p);
}
inline hashval_t
gimplify_hasher::hash (const elt_t *p)
{
tree t = p->val;
return iterative_hash_expr (t, 0);
}
inline bool
gimplify_hasher::equal (const elt_t *p1, const elt_t *p2)
{
tree t1 = p1->val;
tree t2 = p2->val;
enum tree_code code = TREE_CODE (t1);
if (TREE_CODE (t2) != code
|| TREE_TYPE (t1) != TREE_TYPE (t2))
return false;
if (!operand_equal_p (t1, t2, 0))
return false;
/* Only allow them to compare equal if they also hash equal; otherwise
results are nondeterminate, and we fail bootstrap comparison. */
gcc_checking_assert (hash (p1) == hash (p2));
return true;
}
|
covariance.c | /******************************************************************
* Melissa *
*-----------------------------------------------------------------*
* COPYRIGHT (C) 2017 by INRIA and EDF. ALL RIGHTS RESERVED. *
* *
* This source is covered by the BSD 3-Clause License. *
* Refer to the LICENCE file for further information. *
* *
*-----------------------------------------------------------------*
* Original Contributors: *
* Theophile Terraz, *
* Bruno Raffin, *
* Alejandro Ribes, *
* Bertrand Iooss, *
******************************************************************/
/**
*
* @file covariance.c
* @brief Functions needed to compute covariances.
* @author Terraz Théophile
* @date 2016-01-07
*
**/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#ifdef BUILD_WITH_MPI
//#include <mpi.h>
#endif // BUILD_WITH_MPI
#ifdef BUILD_WITH_OPENMP
#include <omp.h>
#endif // BUILD_WITH_OPENMP
#include "mean.h"
#include "variance.h"
#include "covariance.h"
#include "melissa_utils.h"
/**
*******************************************************************************
*
* @ingroup stats_base
*
* This function initializes a covariance structure.
*
*******************************************************************************
*
* @param[in,out] *covariance
* the covariance structure to initialize
*
* @param[in] vect_size
* size of the covariance vector
*
*******************************************************************************/
void init_covariance (covariance_t *covariance,
const int vect_size)
{
covariance->covariance = melissa_calloc (vect_size, sizeof(double));
init_mean (&covariance->mean1, vect_size);
init_mean (&covariance->mean2, vect_size);
covariance->increment = 0;
}
/**
*******************************************************************************
*
* @ingroup stats_base
*
* This function updates the incremental covariance.
*
*******************************************************************************
*
* @param[in,out] *covariance
* input: previously computed covariance,
* output: incremented covariance
*
* @param[in] in_vect1[]
* first input vector of double values
*
* @param[in] in_vect2[]
* second input vector of double values
*
* @param[in] vect_size
* size of the input vectors
*
*******************************************************************************/
//void increment_covariance (covariance_t *covariance,
// double in_vect1[],
// double in_vect2[],
// const int vect_size)
//{
// int i;
// int incr = 0;
// increment_mean(&(covariance->mean1), in_vect1, vect_size);
// incr = covariance->increment;
// if (covariance->increment > 0)
// {
//#pragma omp parallel for schedule(static) firstprivate(incr)
// for (i=0; i<vect_size; i++)
// {
// covariance->covariance[i] *= (incr - 1);
// covariance->covariance[i] += (in_vect1[i] - covariance->mean1.mean[i]) * (in_vect2[i] - covariance->mean2.mean[i]);
// covariance->covariance[i] /= (double)(incr);
// }
// }
// increment_mean(&(covariance->mean2), in_vect2, vect_size);
// covariance->increment += 1;
//}
void increment_covariance (covariance_t *covariance,
double in_vect1[],
double in_vect2[],
const int vect_size)
{
int i;
double incr = 0;
covariance->increment += 1;
incr = (double)covariance->increment;
increment_mean(&(covariance->mean1), in_vect1, vect_size);
if (covariance->increment > 1)
{
#pragma omp parallel for schedule(static) firstprivate(incr)
for (i=0; i<vect_size; i++)
{
covariance->covariance[i] *= (incr - 2);
covariance->covariance[i] += (in_vect1[i] - covariance->mean1.mean[i]) * (in_vect2[i] - covariance->mean2.mean[i]);
covariance->covariance[i] /= (incr - 1);
}
}
increment_mean(&(covariance->mean2), in_vect2, vect_size);
}
/**
*******************************************************************************
*
* @ingroup stats_base
*
* This function updates the incremental covariance.
*
*******************************************************************************
*
* @param[in] *covariance1
* first input partial covariance
*
* @param[in] *covariance2
* second input partial covariance
*
* @param[out] *updated_covariance
* updated covariance
*
* @param[in] vect_size
* size of the input vectors
*
*******************************************************************************/
void update_covariance (covariance_t *covariance1,
covariance_t *covariance2,
covariance_t *updated_covariance,
const int vect_size)
{
int i;
updated_covariance->increment = covariance1->increment + covariance2->increment;
#pragma omp parallel for schedule(static)
for (i=0; i<vect_size; i++)
{
updated_covariance->covariance[i] = ((covariance1->increment - 1) * covariance1->covariance[i]
+ (covariance2->increment - 1) * covariance2->covariance[i]
+ ((covariance1->increment * covariance2->increment)/updated_covariance->increment)
* (covariance2->mean1.mean[i] - covariance1->mean1.mean[i])
* (covariance2->mean2.mean[i] - covariance1->mean2.mean[i]))
/ (updated_covariance->increment - 1);
}
update_mean (&covariance1->mean1, &covariance2->mean1, &updated_covariance->mean1, vect_size);
update_mean (&covariance1->mean2, &covariance2->mean2, &updated_covariance->mean2, vect_size);
}
/**
*******************************************************************************
*
* @ingroup save_stats
*
* This function writes an array of covariances structures on disc
*
*******************************************************************************
*
* @param[in] *covars
* covariance structures to save, size nb_time_steps
*
* @param[in] vect_size
* size of double vectors
*
* @param[in] nb_time_steps
* number of time_steps of the study
*
* @param[in] f
* file descriptor
*
*******************************************************************************/
void save_covariance(covariance_t *covars,
int vect_size,
int nb_time_steps,
FILE* f)
{
int i;
for (i=0; i<nb_time_steps; i++)
{
fwrite(covars[i].covariance, sizeof(double), vect_size, f);
save_mean (&covars[i].mean1, vect_size, 1, f);
save_mean (&covars[i].mean2, vect_size, 1, f);
fwrite(&covars[i].increment, sizeof(int), 1, f);
}
}
/**
*******************************************************************************
*
* @ingroup save_stats
*
* This function reads an array of covariances structures on disc
*
*******************************************************************************
*
* @param[in] *covars
* covariance structures to read, size nb_time_steps
*
* @param[in] vect_size
* size of double vectors
*
* @param[in] nb_time_steps
* number of time_steps of the study
*
* @param[in] f
* file descriptor
*
*******************************************************************************/
void read_covariance(covariance_t *covars,
int vect_size,
int nb_time_steps,
FILE* f)
{
int i;
for (i=0; i<nb_time_steps; i++)
{
fread(covars[i].covariance, sizeof(double), vect_size, f);
read_mean (&covars[i].mean1, vect_size, 1, f);
read_mean (&covars[i].mean2, vect_size, 1, f);
fread(&covars[i].increment, sizeof(int), 1, f);
}
}
/**
*******************************************************************************
*
* @ingroup stats_base
*
* This function frees a covariance structure.
*
*******************************************************************************
*
* @param[in] *covariance
* the covariance structure to free
*
*******************************************************************************/
void free_covariance (covariance_t *covariance)
{
melissa_free (covariance->covariance);
free_mean (&covariance->mean1);
free_mean (&covariance->mean2);
}
|
ast-dump-openmp-begin-declare-variant_8.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
#pragma omp begin declare variant match(device={kind(cpu)})
int also_before(void) {
return 1;
}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score(0):llvm)})
int also_after(void) {
return 0;
}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score(100):llvm)})
int also_before(void) {
return 0;
}
#pragma omp end declare variant
int also_after(void) {
return 2;
}
int test(void) {
// Should return 0.
return also_after() + also_before();
}
// Make sure:
// - we do see the ast nodes for the cpu kind
// - we do see the ast nodes for the llvm vendor
// - we pick the right callees
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:21> col:5 implicit used also_before 'int ({{.*}})'
// CHECK-NEXT: | |-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit device={kind(cpu)}
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(100): llvm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:17:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_3]] <line:6:1, line:8:1> line:6:1 also_before[device={kind(cpu)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_7:0x[a-z0-9]*]] <col:23, line:8:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_8:0x[a-z0-9]*]] <line:7:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_9:0x[a-z0-9]*]] <col:10> 'int' 1
// CHECK-NEXT: |-FunctionDecl [[ADDR_10:0x[a-z0-9]*]] <line:12:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_11:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(0): llvm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_13:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_13]] <col:1, line:14:1> line:12:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:22, line:14:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:13:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:17:1, line:19:1> line:17:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_17:0x[a-z0-9]*]] <col:23, line:19:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_18:0x[a-z0-9]*]] <line:18:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_19:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_20:0x[a-z0-9]*]] prev [[ADDR_10]] <line:22:1, line:24:1> line:22:5 used also_after 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_21:0x[a-z0-9]*]] <col:22, line:24:1>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_22:0x[a-z0-9]*]] <line:23:3, col:10>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_23:0x[a-z0-9]*]] <col:10> 'int' 2
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_24:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(score(0): llvm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <line:12:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: `-FunctionDecl [[ADDR_25:0x[a-z0-9]*]] <line:26:1, line:29:1> line:26:5 test 'int ({{.*}})'
// CHECK-NEXT: `-CompoundStmt [[ADDR_26:0x[a-z0-9]*]] <col:16, line:29:1>
// CHECK-NEXT: `-ReturnStmt [[ADDR_27:0x[a-z0-9]*]] <line:28:3, col:37>
// CHECK-NEXT: `-BinaryOperator [[ADDR_28:0x[a-z0-9]*]] <col:10, col:37> 'int' '+'
// CHECK-NEXT: |-PseudoObjectExpr [[ADDR_29:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | |-CallExpr [[ADDR_30:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_31:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_32:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_20]] 'also_after' 'int ({{.*}})'
// CHECK-NEXT: | `-CallExpr [[ADDR_33:0x[a-z0-9]*]] <line:12:1, line:28:21> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <line:12:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <col:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: `-PseudoObjectExpr [[ADDR_35:0x[a-z0-9]*]] <line:28:25, col:37> 'int'
// CHECK-NEXT: |-CallExpr [[ADDR_36:0x[a-z0-9]*]] <col:25, col:37> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CHECK-NEXT: `-CallExpr [[ADDR_39:0x[a-z0-9]*]] <line:17:1, line:28:37> 'int'
// CHECK-NEXT: `-ImplicitCastExpr [[ADDR_40:0x[a-z0-9]*]] <line:17:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
|
valid.mob3.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_128_56_56_128_3_3.h"
#include "gen_ukr_A4B2gemm_1_128_56_56_128_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 56;
int Ny = 56;
int Nh = 3;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<128+0;c5+=128)
{
for(int xy5=0;xy5<3136+0;xy5+=3136)
{
for(int f5=0;f5<128+0;f5+=128)
{
for(int c4=c5;c4<min(128, 128+c5);c4+=128)
{
for(int f4=f5;f4<min(128, 128+f5);f4+=Tf2)
{
for(int xy4=xy5;xy4<min(3136, 3136+xy5);xy4+=3136)
{
for(int c3=c4;c3<min(128, 128+c4);c3+=Tc1)
{
for(int f3=f4;f3<min(128, Tf2+f4);f3+=Tf2)
{
for(int xy3=xy4;xy3<min(3136, 3136+xy4);xy3+=Txy3)
{
for(int xy2=xy3;xy2<min(3136, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(128, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(128, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(128, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(3136, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(128, 16+f2);f1+=16)
{
int ctile=min(Tc1, 128-c1);
int x1=xy1/56;
int y1=xy1%56/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*430592+c1_1*3364+1*x1*58+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*18432+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*401408+of1_1*3136+x1*56+y1*1+of1_2*1;
if(56-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(56*56-xy1>=6){
for(int sti=56-y1;sti<6;sti+=1)
{
Astrides[sti]+=2;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=56-y1;sti<6;sti+=1)
{
Astrides[sti]-=2;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
dicts.h |
/* Software SPAMS v2.1 - Copyright 2009-2011 Julien Mairal
*
* This file is part of SPAMS.
*
* SPAMS is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SPAMS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SPAMS. If not, see <http://www.gnu.org/licenses/>.
*/
/*!
*
* \file
* toolbox dictLearn
*
* by Julien Mairal
* julien.mairal@inria.fr
*
* File dicts.h
* \brief Contains dictionary learning algorithms
* It requires the toolbox decomp */
#ifndef DICTS_H
#define DICTS_H
#include <decomp.h>
char buffer_string[50];
enum constraint_type_D { L2, L1L2, L1L2FL, L1L2MU};
enum mode_compute { AUTO, PARAM1, PARAM2, PARAM3};
template <typename T> struct ParamDictLearn {
public:
ParamDictLearn() :
mode(PENALTY),
posAlpha(false),
modeD(L2),
posD(false),
modeParam(AUTO),
t0(1e-5),
rho(5),
gamma1(0),
mu(0),
lambda3(0),
lambda4(0),
lambda2(0),
gamma2(0),
approx(0.0),
p(1.0),
whiten(false),
expand(false),
isConstant(false),
updateConstant(true),
ThetaDiag(false),
ThetaDiagPlus(false),
ThetaId(false),
DequalsW(false),
weightClasses(false),
balanceClasses(false),
extend(false),
pattern(false),
stochastic(false),
scaleW(false),
batch(false),
verbose(true),
clean(true),
log(false),
updateD(true),
updateW(true),
updateTheta(true),
logName(NULL),
iter_updateD(1) { };
~ParamDictLearn() { delete[](logName); };
int iter;
T lambda;
constraint_type mode;
bool posAlpha;
constraint_type_D modeD;
bool posD;
mode_compute modeParam;
T t0;
T rho;
T gamma1;
T mu;
T lambda3;
T lambda4;
T lambda2;
T gamma2;
T approx;
T p;
bool whiten;
bool expand;
bool isConstant;
bool updateConstant;
bool ThetaDiag;
bool ThetaDiagPlus;
bool ThetaId;
bool DequalsW;
bool weightClasses;
bool balanceClasses;
bool extend;
bool pattern;
bool stochastic;
bool scaleW;
bool batch;
bool verbose;
bool clean;
bool log;
bool updateD;
bool updateW;
bool updateTheta;
char* logName;
int iter_updateD;
};
template <typename T> class Trainer {
public:
/// Empty constructor
Trainer();
/// Constructor with data
Trainer(const int k, const int batchsize = 256,
const int NUM_THREADS=-1);
/// Constructor with initial dictionary
Trainer(const Matrix<T>& D, const int batchsize = 256,
const int NUM_THREADS=-1);
/// Constructor with existing structure
Trainer(const Matrix<T>& A, const Matrix<T>& B, const Matrix<T>& D,
const int itercount, const int batchsize,
const int NUM_THREADS);
/// train or retrain using the matrix X
void train(const Data<T>& X, const ParamDictLearn<T>& param);
void trainOffline(const Data<T>& X, const ParamDictLearn<T>& param);
/// train or retrain using the groups XT
void train(const Data<T>& X, const vector_groups& groups,
const int J, const constraint_type
mode, const bool whiten = false, const T* param_C = NULL,
const int p = 1, const bool pattern = false);
/// Accessors
void getA(Matrix<T>& A) const { A.copy(_A);};
void getB(Matrix<T>& B) const { B.copy(_B);};
void getD(Matrix<T>& D) const { D.copy(_D);};
int getIter() const { return _itercount; };
private:
/// Forbid lazy copies
explicit Trainer<T>(const Trainer<T>& trainer);
/// Forbid lazy copies
Trainer<T>& operator=(const Trainer<T>& trainer);
/// clean the dictionary
void cleanDict(const Data<T>& X, Matrix<T>& G,
const bool posD = false,
const constraint_type_D modeD = L2, const T gamma1 = 0,
const T gamma2 = 0,
const T maxCorrel =
0.999999);
/// clean the dictionary
void cleanDict(Matrix<T>& G);
Matrix<T> _A;
Matrix<T> _B;
Matrix<T> _D;
int _k;
bool _initialDict;
int _itercount;
int _batchsize;
int _NUM_THREADS;
};
/// Empty constructor
template <typename T> Trainer<T>::Trainer() : _k(0), _initialDict(false),
_itercount(0), _batchsize(256) {
_NUM_THREADS=1;
#ifdef _OPENMP
_NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs());
#endif
_batchsize=floor(_batchsize*(_NUM_THREADS+1)/2);
};
/// Constructor with data
template <typename T> Trainer<T>::Trainer(const int k, const
int batchsize, const int NUM_THREADS) : _k(k),
_initialDict(false), _itercount(0),_batchsize(batchsize),
_NUM_THREADS(NUM_THREADS) {
if (_NUM_THREADS == -1) {
_NUM_THREADS=1;
#ifdef _OPENMP
_NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs());
#endif
}
};
/// Constructor with initial dictionary
template <typename T> Trainer<T>::Trainer(const Matrix<T>& D,
const int batchsize, const int NUM_THREADS) : _k(D.n()),
_initialDict(true),_itercount(0),_batchsize(batchsize),
_NUM_THREADS(NUM_THREADS) {
_D.copy(D);
_A.resize(D.n(),D.n());
_B.resize(D.m(),D.n());
if (_NUM_THREADS == -1) {
_NUM_THREADS=1;
#ifdef _OPENMP
_NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs());
#endif
}
}
/// Constructor with existing structure
template <typename T> Trainer<T>::Trainer(const Matrix<T>& A, const Matrix<T>&
B, const Matrix<T>& D, const int itercount, const int batchsize,
const int NUM_THREADS) : _k(D.n()),_initialDict(true),_itercount(itercount),
_batchsize(batchsize),
_NUM_THREADS(NUM_THREADS) {
_D.copy(D);
_A.copy(A);
_B.copy(B);
if (_NUM_THREADS == -1) {
_NUM_THREADS=1;
#ifdef _OPENMP
_NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs());
#endif
}
};
template <typename T>
void Trainer<T>::cleanDict(const Data<T>& X, Matrix<T>& G,
const bool posD,
const constraint_type_D modeD, const T gamma1,
const T gamma2,
const T maxCorrel) {
int sparseD = modeD == L1L2 ? 2 : 6;
const int k = _D.n();
const int n = _D.m();
const int M = X.n();
T* const pr_G=G.rawX();
Vector<T> aleat(n);
Vector<T> col(n);
for (int i = 0; i<k; ++i) {
//pr_G[i*k+i] += 1e-10;
for (int j = i; j<k; ++j) {
if ((j > i && abs(pr_G[i*k+j])/sqrt(pr_G[i*k+i]*pr_G[j*k+j]) > maxCorrel) ||
(j == i && abs(pr_G[i*k+j]) < 1e-4)) {
/// remove element j and replace it by a random element of X
const int ind = random() % M;
Vector<T> d, g;
_D.refCol(j,d);
X.getData(col,ind);
d.copy(col);
if (modeD != L2) {
aleat.copy(d);
aleat.sparseProject(d,T(1.0),sparseD,gamma1,gamma2,T(2.0),posD);
} else {
if (posD) d.thrsPos();
d.normalize();
}
G.refCol(j,g);
_D.multTrans(d,g);
for (int l = 0; l<_D.n(); ++l)
pr_G[l*k+j] = pr_G[j*k+l];
}
}
}
}
template <typename T>
void Trainer<T>::cleanDict(Matrix<T>& G) {
const int k = _D.n();
const int n = _D.m();
T* const pr_G=G.rawX();
for (int i = 0; i<k; ++i) {
pr_G[i*k+i] += 1e-10;
}
}
template <typename T>
void Trainer<T>::train(const Data<T>& X, const ParamDictLearn<T>& param) {
T rho = param.rho;
T t0 = param.t0;
int sparseD = param.modeD == L1L2 ? 2 : param.modeD == L1L2MU ? 7 : 6;
int NUM_THREADS=init_omp(_NUM_THREADS);
if (param.verbose) {
cout << "num param iterD: " << param.iter_updateD << endl;
if (param.batch) {
cout << "Batch Mode" << endl;
} else if (param.stochastic) {
cout << "Stochastic Gradient. rho : " << rho << ", t0 : " << t0 << endl;
} else {
if (param.modeParam == AUTO) {
cout << "Online Dictionary Learning with no parameter " << endl;
} else if (param.modeParam == PARAM1) {
cout << "Online Dictionary Learning with parameters: " << t0 << " rho: " << rho << endl;
} else {
cout << "Online Dictionary Learning with exponential decay t0: " << t0 << " rho: " << rho << endl;
}
}
if (param.posD)
cout << "Positivity constraints on D activated" << endl;
if (param.posAlpha)
cout << "Positivity constraints on alpha activated" << endl;
if (param.modeD != L2) cout << "Sparse dictionaries, mode: " << param.modeD << ", gamma1: " << param.gamma1 << ", gamma2: " << param.gamma2 << endl;
cout << "mode Alpha " << param.mode << endl;
if (param.clean) cout << "Cleaning activated " << endl;
if (param.log && param.logName) {
cout << "log activated " << endl;
cerr << param.logName << endl;
}
if (param.mode == PENALTY && param.lambda==0 && param.lambda2 > 0 && !param.posAlpha)
cout << "L2 solver is used" << endl;
if (_itercount > 0)
cout << "Retraining from iteration " << _itercount << endl;
flush(cout);
}
const int M = X.n();
const int K = _k;
const int n = X.m();
const int L = param.mode == SPARSITY ? static_cast<int>(param.lambda) :
param.mode == PENALTY && param.lambda == 0 && param.lambda2 > 0 && !param.posAlpha ? K : MIN(n,K);
const int batchsize= param.batch ? M : MIN(_batchsize,M);
if (param.verbose) {
cout << "batch size: " << batchsize << endl;
cout << "L: " << L << endl;
cout << "lambda: " << param.lambda << endl;
cout << "mode: " << param.mode << endl;
flush(cout);
}
if (_D.m() != n || _D.n() != K)
_initialDict=false;
srandom(0);
Vector<T> col(n);
if (!_initialDict) {
_D.resize(n,K);
for (int i = 0; i<K; ++i) {
const int ind = random() % M;
Vector<T> d;
_D.refCol(i,d);
X.getData(col,ind);
d.copy(col);
}
_initialDict=true;
}
if (param.verbose) {
cout << "*****Online Dictionary Learning*****" << endl;
flush(cout);
}
Vector<T> tmp(n);
if (param.modeD != L2) {
for (int i = 0; i<K; ++i) {
Vector<T> d;
_D.refCol(i,d);
tmp.copy(d);
tmp.sparseProject(d,T(1.0),sparseD,param.gamma1,
param.gamma2,T(2.0),param.posD);
}
} else {
if (param.posD) _D.thrsPos();
_D.normalize();
}
int count=0;
int countPrev=0;
T scalt0 = abs<T>(t0);
if (_itercount == 0) {
_A.resize(K,K);
_A.setZeros();
_B.resize(n,K);
_B.setZeros();
if (!param.batch) {
_A.setDiag(scalt0);
_B.copy(_D);
_B.scal(scalt0);
}
}
//Matrix<T> G(K,K);
Matrix<T> Borig(n,K);
Matrix<T> Aorig(K,K);
Matrix<T> Bodd(n,K);
Matrix<T> Aodd(K,K);
Matrix<T> Beven(n,K);
Matrix<T> Aeven(K,K);
SpVector<T>* spcoeffT=new SpVector<T>[_NUM_THREADS];
Vector<T>* DtRT=new Vector<T>[_NUM_THREADS];
Vector<T>* XT=new Vector<T>[_NUM_THREADS];
Matrix<T>* BT=new Matrix<T>[_NUM_THREADS];
Matrix<T>* AT=new Matrix<T>[_NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[_NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[_NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[_NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[_NUM_THREADS];
Vector<T>* uT=new Vector<T>[_NUM_THREADS];
for (int i = 0; i<_NUM_THREADS; ++i) {
spcoeffT[i].resize(K);
DtRT[i].resize(K);
XT[i].resize(n);
BT[i].resize(n,K);
BT[i].setZeros();
AT[i].resize(K,K);
AT[i].setZeros();
GsT[i].resize(L,L);
GsT[i].setZeros();
invGsT[i].resize(L,L);
invGsT[i].setZeros();
GaT[i].resize(K,L);
GaT[i].setZeros();
workT[i].resize(K,3);
workT[i].setZeros();
uT[i].resize(L);
uT[i].setZeros();
}
Timer time, time2;
time.start();
srandom(0);
Vector<int> perm;
perm.randperm(M);
Aodd.setZeros();
Bodd.setZeros();
Aeven.setZeros();
Beven.setZeros();
Aorig.copy(_A);
Borig.copy(_B);
int JJ = param.iter < 0 ? 100000000 : param.iter;
bool even=true;
int last_written=-40;
int i;
for (i = 0; i<JJ; ++i) {
if (param.verbose) {
cout << "Iteration: " << i << endl;
flush(cout);
}
time.stop();
if (param.iter < 0 &&
time.getElapsed() > T(-param.iter)) break;
if (param.log) {
int seconds=static_cast<int>(floor(log(time.getElapsed())*5));
if (seconds > last_written) {
last_written++;
sprintf(buffer_string,"%s_%d.log",param.logName,
last_written+40);
writeLog(_D,T(time.getElapsed()),i,buffer_string);
fprintf(stderr,"\r%d",i);
}
}
time.start();
Matrix<T> G;
_D.XtX(G);
if (param.clean)
this->cleanDict(X,G,param.posD,
param.modeD,param.gamma1,param.gamma2);
G.addDiag(MAX(param.lambda2,1e-10));
int j;
for (j = 0; j<_NUM_THREADS; ++j) {
AT[j].setZeros();
BT[j].setZeros();
}
#pragma omp parallel for private(j)
for (j = 0; j<batchsize; ++j) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
const int index=perm[(j+i*batchsize) % M];
Vector<T>& Xj = XT[numT];
SpVector<T>& spcoeffj = spcoeffT[numT];
Vector<T>& DtRj = DtRT[numT];
//X.refCol(index,Xj);
X.getData(Xj,index);
if (param.whiten) {
if (param.pattern) {
Vector<T> mean(4);
Xj.whiten(mean,param.pattern);
} else {
Xj.whiten(X.V());
}
}
_D.multTrans(Xj,DtRj);
Matrix<T>& Gs = GsT[numT];
Matrix<T>& Ga = GaT[numT];
Matrix<T>& invGs = invGsT[numT];
Matrix<T>& work= workT[numT];
Vector<T>& u = uT[numT];
Vector<int> ind;
Vector<T> coeffs_sparse;
spcoeffj.setL(L);
spcoeffj.refIndices(ind);
spcoeffj.refVal(coeffs_sparse);
T normX=Xj.nrm2sq();
coeffs_sparse.setZeros();
if (param.mode < SPARSITY) {
if (param.mode == PENALTY && param.lambda==0 && param.lambda2 > 0 && !param.posAlpha) {
Matrix<T>& GG = G;
u.set(0);
GG.conjugateGradient(DtRj,u,1e-4,2*K);
for (int k = 0; k<K; ++k) {
ind[k]=k;
coeffs_sparse[k]=u[k];
}
} else {
coreLARS2(DtRj,G,Gs,Ga,invGs,u,coeffs_sparse,ind,work,normX,param.mode,param.lambda,param.posAlpha);
}
} else {
if (param.mode == SPARSITY) {
coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,T(0.0),T(0.0));
} else if (param.mode==L2ERROR2) {
coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,param.lambda,T(0.0));
} else {
coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,T(0.0),param.lambda);
}
}
int count2=0;
for (int k = 0; k<L; ++k)
if (ind[k] == -1) {
break;
} else {
++count2;
}
sort(ind.rawX(),coeffs_sparse.rawX(),0,count2-1);
spcoeffj.setL(count2);
AT[numT].rank1Update(spcoeffj);
BT[numT].rank1Update(Xj,spcoeffj);
}
if (param.batch) {
_A.setZeros();
_B.setZeros();
for (j = 0; j<_NUM_THREADS; ++j) {
_A.add(AT[j]);
_B.add(BT[j]);
}
Vector<T> di, ai,bi;
Vector<T> newd(n);
for (j = 0; j<param.iter_updateD; ++j) {
for (int k = 0; k<K; ++k) {
if (_A[k*K+k] > 1e-6) {
_D.refCol(k,di);
_A.refCol(k,ai);
_B.refCol(k,bi);
_D.mult(ai,newd,T(-1.0));
newd.add(bi);
newd.scal(T(1.0)/_A[k*K+k]);
newd.add(di);
if (param.modeD != L2) {
newd.sparseProject(di,T(1.0),
sparseD,param.gamma1,
param.gamma2,T(2.0),param.posD);
} else {
if (param.posD) newd.thrsPos();
newd.normalize2();
di.copy(newd);
}
} else if (param.clean) {
_D.refCol(k,di);
di.setZeros();
}
}
}
} else if (param.stochastic) {
_A.setZeros();
_B.setZeros();
for (j = 0; j<_NUM_THREADS; ++j) {
_A.add(AT[j]);
_B.add(BT[j]);
}
_D.mult(_A,_B,false,false,T(-1.0),T(1.0));
T step_grad=rho/T(t0+batchsize*(i+1));
_D.add(_B,step_grad);
Vector<T> dj;
Vector<T> dnew(n);
if (param.modeD != L2) {
for (j = 0; j<K; ++j) {
_D.refCol(j,dj);
dnew.copy(dj);
dnew.sparseProject(dj,T(1.0),sparseD,param.gamma1,
param.gamma2,T(2.0),param.posD);
}
} else {
for (j = 0; j<K; ++j) {
_D.refCol(j,dj);
if (param.posD) dj.thrsPos();
dj.normalize2();
}
}
} else {
/// Dictionary Update
/// Check the epoch parity
int epoch = (((i+1) % M)*batchsize) / M;
if ((even && ((epoch % 2) == 1)) || (!even && ((epoch % 2) == 0))) {
Aodd.copy(Aeven);
Bodd.copy(Beven);
Aeven.setZeros();
Beven.setZeros();
count=countPrev;
countPrev=0;
even=!even;
}
int ii=_itercount+i;
int num_elem=MIN(2*M, ii < batchsize ? ii*batchsize :
batchsize*batchsize+ii-batchsize);
T scal2=T(T(1.0)/batchsize);
T scal;
int totaliter=_itercount+count;
if (param.modeParam == PARAM2) {
scal=param.rho;
} else if (param.modeParam == PARAM1) {
scal=MAX(0.95,pow(T(totaliter)/T(totaliter+1),-rho));
} else {
scal = T(_itercount+num_elem+1-
batchsize)/T(_itercount+num_elem+1);
}
Aeven.scal(scal);
Beven.scal(scal);
Aodd.scal(scal);
Bodd.scal(scal);
if ((_itercount > 0 && i*batchsize < M)
|| (_itercount == 0 && t0 != 0 &&
i*batchsize < 10000)) {
Aorig.scal(scal);
Borig.scal(scal);
_A.copy(Aorig);
_B.copy(Borig);
} else {
_A.setZeros();
_B.setZeros();
}
for (j = 0; j<_NUM_THREADS; ++j) {
Aeven.add(AT[j],scal2);
Beven.add(BT[j],scal2);
}
_A.add(Aodd);
_A.add(Aeven);
_B.add(Bodd);
_B.add(Beven);
++count;
++countPrev;
Vector<T> di, ai,bi;
Vector<T> newd(n);
for (j = 0; j<param.iter_updateD; ++j) {
for (int k = 0; k<K; ++k) {
if (_A[k*K+k] > 1e-6) {
_D.refCol(k,di);
_A.refCol(k,ai);
_B.refCol(k,bi);
_D.mult(ai,newd,T(-1.0));
newd.add(bi);
newd.scal(T(1.0)/_A[k*K+k]);
newd.add(di);
if (param.modeD != L2) {
newd.sparseProject(di,T(1.0),sparseD,
param.gamma1,param.gamma2,T(2.0),param.posD);
} else {
if (param.posD) newd.thrsPos();
newd.normalize2();
di.copy(newd);
}
} else if (param.clean &&
((_itercount+i)*batchsize) > 10000) {
_D.refCol(k,di);
di.setZeros();
}
}
}
}
}
_itercount += i;
if (param.verbose)
time.printElapsed();
delete[](spcoeffT);
delete[](DtRT);
delete[](AT);
delete[](BT);
delete[](GsT);
delete[](invGsT);
delete[](GaT);
delete[](uT);
delete[](XT);
delete[](workT);
};
template <typename T>
void writeLog(const Matrix<T>& D, const T time, int iter,
char* name) {
std::ofstream f;
f.precision(12);
f.flags(std::ios_base::scientific);
f.open(name, ofstream::trunc);
f << time << " " << iter << std::endl;
for (int i = 0; i<D.n(); ++i) {
for (int j = 0; j<D.m(); ++j) {
f << D[i*D.m()+j] << " ";
}
f << std::endl;
}
f << std::endl;
f.close();
};
template <typename T>
void Trainer<T>::trainOffline(const Data<T>& X,
const ParamDictLearn<T>& param) {
int sparseD = param.modeD == L1L2 ? 2 : 6;
int J = param.iter;
int batch_size= _batchsize;
int batchsize= _batchsize;
int NUM_THREADS=init_omp(_NUM_THREADS);
const int n = X.m();
const int K = _k;
const int M = X.n();
cout << "*****Offline Dictionary Learning*****" << endl;
fprintf(stderr,"num param iterD: %d\n",param.iter_updateD);
cout << "batch size: " << _batchsize << endl;
cout << "lambda: " << param.lambda << endl;
cout << "X: " << n << " x " << M << endl;
cout << "D: " << n << " x " << K << endl;
flush(cout);
srandom(0);
Vector<T> col(n);
if (!_initialDict) {
_D.resize(n,K);
for (int i = 0; i<K; ++i) {
const int ind = random() % M;
Vector<T> d;
_D.refCol(i,d);
X.getData(col,ind);
d.copy(col);
}
_initialDict=true;
}
Vector<T> tmp(n);
if (param.modeD != L2) {
for (int i = 0; i<K; ++i) {
Vector<T> d;
_D.refCol(i,d);
tmp.copy(d);
tmp.sparseProject(d,T(1.0),sparseD,param.gamma1,
param.gamma2,T(2.0),param.posD);
}
} else {
if (param.posD) _D.thrsPos();
_D.normalize();
}
Matrix<T> G(K,K);
Matrix<T> coeffs(K,M);
coeffs.setZeros();
Matrix<T> B(n,K);
Matrix<T> A(K,K);
SpVector<T>* spcoeffT=new SpVector<T>[NUM_THREADS];
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* coeffsoldT=new Vector<T>[NUM_THREADS];
Matrix<T>* BT=new Matrix<T>[NUM_THREADS];
Matrix<T>* AT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
spcoeffT[i].resize(K);
DtRT[i].resize(K);
coeffsoldT[i].resize(K);
BT[i].resize(n,K);
BT[i].setZeros();
AT[i].resize(K,K);
AT[i].setZeros();
}
Timer time;
time.start();
srandom(0);
Vector<int> perm;
perm.randperm(M);
int JJ = J < 0 ? 100000000 : J;
Vector<T> weights(M);
weights.setZeros();
for (int i = 0; i<JJ; ++i) {
if (J < 0 && time.getElapsed() > T(-J)) break;
_D.XtX(G);
if (param.clean)
this->cleanDict(X,G,param.posD,
param.modeD,param.gamma1,param.gamma2);
int j;
#pragma omp parallel for private(j)
for (j = 0; j<batch_size; ++j) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
const int ind=perm[(j+i*batch_size) % M];
Vector<T> Xj, coeffj;
SpVector<T>& spcoeffj = spcoeffT[numT];
Vector<T>& DtRj = DtRT[numT];
Vector<T>& oldcoeffj = coeffsoldT[numT];
X.getData(Xj,ind);
if (param.whiten) {
if (param.pattern) {
Vector<T> mean(4);
Xj.whiten(mean,param.pattern);
} else {
Xj.whiten(X.V());
}
}
coeffs.refCol(ind,coeffj);
oldcoeffj.copy(coeffj);
_D.multTrans(Xj,DtRj);
coeffj.toSparse(spcoeffj);
G.mult(spcoeffj,DtRj,T(-1.0),T(1.0));
if (param.mode == PENALTY) {
coreIST(G,DtRj,coeffj,param.lambda,200,T(1e-3));
} else {
T normX = Xj.nrm2sq();
coreISTconstrained(G,DtRj,coeffj,normX,param.lambda,200,T(1e-3));
}
oldcoeffj.toSparse(spcoeffj);
AT[numT].rank1Update(spcoeffj,-weights[ind]);
coeffj.toSparse(spcoeffj);
AT[numT].rank1Update(spcoeffj);
weights[ind]++;
oldcoeffj.scal(weights[ind]);
oldcoeffj.sub(coeffj);
oldcoeffj.toSparse(spcoeffj);
BT[numT].rank1Update(Xj,spcoeffj,T(-1.0));
}
A.setZeros();
B.setZeros();
T scal;
int totaliter=i;
int ii = i;
int num_elem=MIN(2*M, ii < batchsize ? ii*batchsize :
batchsize*batchsize+ii-batchsize);
if (param.modeParam == PARAM2) {
scal=param.rho;
} else if (param.modeParam == PARAM1) {
scal=MAX(0.95,pow(T(totaliter)/T(totaliter+1),-param.rho));
} else {
scal = T(num_elem+1-
batchsize)/T(num_elem+1);
}
for (j = 0; j<NUM_THREADS; ++j) {
A.add(AT[j]);
B.add(BT[j]);
AT[j].scal(scal);
BT[j].scal(scal);
}
weights.scal(scal);
Vector<T> di, ai,bi;
Vector<T> newd(n);
for (j = 0; j<param.iter_updateD; ++j) {
for (int k = 0; k<K; ++k) {
if (A[k*K+k] > 1e-6) {
_D.refCol(k,di);
A.refCol(k,ai);
B.refCol(k,bi);
_D.mult(ai,newd,T(-1.0));
newd.add(bi);
newd.scal(T(1.0)/A[k*K+k]);
newd.add(di);
if (param.modeD != L2) {
newd.sparseProject(di,T(1.0),
sparseD,param.gamma1,
param.gamma2,T(2.0),param.posD);
} else {
if (param.posD) newd.thrsPos();
newd.normalize2();
di.copy(newd);
}
} else if (param.clean) {
_D.refCol(k,di);
di.setZeros();
}
}
}
}
_D.XtX(G);
if (param.clean)
this->cleanDict(X,G,param.posD,param.modeD,
param.gamma1,param.gamma2);
time.printElapsed();
delete[](spcoeffT);
delete[](DtRT);
delete[](AT);
delete[](BT);
delete[](coeffsoldT);
}
#endif
|
csr.c | /*!
* \file
*
* \brief Various routines with dealing with CSR matrices
*
* \author George Karypis
* \version\verbatim $Id: csr.c 18518 2015-03-15 19:24:53Z karypis $ \endverbatim
*/
#include <GKlib.h>
#define OMPMINOPS 50000
/*************************************************************************/
/*! Allocate memory for a CSR matrix and initializes it
\returns the allocated matrix. The various fields are set to NULL.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Create()
{
gk_csr_t *mat;
mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat");
gk_csr_Init(mat);
return mat;
}
/*************************************************************************/
/*! Initializes the matrix
\param mat is the matrix to be initialized.
*/
/*************************************************************************/
void gk_csr_Init(gk_csr_t *mat)
{
memset(mat, 0, sizeof(gk_csr_t));
mat->nrows = mat->ncols = -1;
}
/*************************************************************************/
/*! Frees all the memory allocated for matrix.
\param mat is the matrix to be freed.
*/
/*************************************************************************/
void gk_csr_Free(gk_csr_t **mat)
{
if (*mat == NULL)
return;
gk_csr_FreeContents(*mat);
gk_free((void **)mat, LTERM);
}
/*************************************************************************/
/*! Frees only the memory allocated for the matrix's different fields and
sets them to NULL.
\param mat is the matrix whose contents will be freed.
*/
/*************************************************************************/
void gk_csr_FreeContents(gk_csr_t *mat)
{
gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids,
&mat->colptr, &mat->colind, &mat->colval, &mat->colids,
&mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums,
&mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols,
&mat->rwgts, &mat->cwgts,
LTERM);
}
/*************************************************************************/
/*! Returns a copy of a matrix.
\param mat is the matrix to be duplicated.
\returns the newly created copy of the matrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Dup(gk_csr_t *mat)
{
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr,
gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr"));
if (mat->rowids)
nmat->rowids = gk_icopy(mat->nrows, mat->rowids,
gk_imalloc(mat->nrows, "gk_csr_Dup: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms"));
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind,
gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval,
gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval"));
/* copy the col structure */
if (mat->colptr)
nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr,
gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr"));
if (mat->colids)
nmat->colids = gk_icopy(mat->ncols, mat->colids,
gk_imalloc(mat->ncols, "gk_csr_Dup: colids"));
if (mat->cnorms)
nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms"));
if (mat->colind)
nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind,
gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind"));
if (mat->colval)
nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval,
gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containint a set of consecutive rows.
\param mat is the original matrix.
\param rstart is the starting row.
\param nrows is the number of rows from rstart to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows)
{
ssize_t i;
gk_csr_t *nmat;
if (rstart+nrows > mat->nrows)
return NULL;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart,
gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr"));
for (i=nrows; i>=0; i--)
nmat->rowptr[i] -= nmat->rowptr[0];
ASSERT(nmat->rowptr[0] == 0);
if (mat->rowids)
nmat->rowids = gk_icopy(nrows, mat->rowids+rstart,
gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms"));
if (mat->rsums)
nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums"));
ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]);
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowind+mat->rowptr[rstart],
gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowval+mat->rowptr[rstart],
gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containing a certain set of rows.
\param mat is the original matrix.
\param nrows is the number of rows to extract.
\param rind is the set of row numbers to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind)
{
ssize_t i, ii, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<nrows; i++)
nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]];
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, ii=0; ii<nrows; ii++) {
i = rind[ii];
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix corresponding to a specified partitioning of rows.
\param mat is the original matrix.
\param part is the partitioning vector of the rows.
\param pid is the partition ID that will be extracted.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid)
{
ssize_t i, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = 0;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
nmat->nrows++;
nnz += mat->rowptr[i+1]-mat->rowptr[i];
}
}
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Splits the matrix into multiple sub-matrices based on the provided
color array.
\param mat is the original matrix.
\param color is an array of size equal to the number of non-zeros
in the matrix (row-wise structure). The matrix is split into
as many parts as the number of colors. For meaningfull results,
the colors should be numbered consecutively starting from 0.
\returns an array of matrices for each supplied color number.
*/
/**************************************************************************/
gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color)
{
ssize_t i, j;
int nrows, ncolors;
ssize_t *rowptr;
int *rowind;
float *rowval;
gk_csr_t **smats;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
ncolors = gk_imax(rowptr[nrows], color, 1)+1;
smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats");
for (i=0; i<ncolors; i++) {
smats[i] = gk_csr_Create();
smats[i]->nrows = mat->nrows;
smats[i]->ncols = mat->ncols;
smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
smats[color[j]]->rowptr[i]++;
}
for (i=0; i<ncolors; i++)
MAKECSR(j, nrows, smats[i]->rowptr);
for (i=0; i<ncolors; i++) {
smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind");
smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j];
smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j];
smats[color[j]]->rowptr[i]++;
}
}
for (i=0; i<ncolors; i++)
SHIFTCSR(j, nrows, smats[i]->rowptr);
return smats;
}
/**************************************************************************/
/*! Reads a CSR matrix from the supplied file and stores it the matrix's
forward structure.
\param filename is the file that stores the data.
\param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO,
GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL
specifying the type of the input format.
The GK_CSR_FMT_CSR does not contain a header
line, whereas the GK_CSR_FMT_BINROW is a binary format written
by gk_csr_Write() using the same format specifier.
\param readvals is either 1 or 0, indicating if the CSR file contains
values or it does not. It only applies when GK_CSR_FMT_CSR is
used.
\param numbering is either 1 or 0, indicating if the numbering of the
indices start from 1 or 0, respectively. If they start from 1,
they are automatically decreamented during input so that they
will start from 0. It only applies when GK_CSR_FMT_CSR is
used.
\returns the matrix that was read.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering)
{
ssize_t i, k, l;
size_t nfields, nrows, ncols, nnz, fmt, ncon;
size_t lnlen;
ssize_t *rowptr;
int *rowind, *iinds, *jinds, ival;
float *rowval=NULL, *vals, fval;
int readsizes, readwgts;
char *line=NULL, *head, *tail, fmtstr[256];
FILE *fpin;
gk_csr_t *mat=NULL;
if (!gk_fexists(filename))
gk_errexit(SIGERR, "File %s does not exist!\n", filename);
switch (format) {
case GK_CSR_FMT_BINROW:
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr");
if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1)
gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename);
mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind");
if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename);
if (readvals == 1) {
mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval");
if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
break;
case GK_CSR_FMT_BINCOL:
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr");
if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1)
gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename);
mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind");
if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename);
if (readvals) {
mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval");
if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
break;
case GK_CSR_FMT_IJV:
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && 3*nrows != nnz)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not a multiple of 3.\n", nnz, readvals);
if (readvals == 0 && 2*nrows != nnz)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not a multiple of 2.\n", nnz, readvals);
nnz = nrows;
numbering = (numbering ? - 1 : 0);
/* read the data into three arrays */
iinds = gk_i32malloc(nnz, "iinds");
jinds = gk_i32malloc(nnz, "jinds");
vals = (readvals ? gk_fmalloc(nnz, "vals") : NULL);
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
for (nrows=0, ncols=0, i=0; i<nnz; i++) {
if (readvals) {
if (fscanf(fpin, "%d %d %f", &iinds[i], &jinds[i], &vals[i]) != 3)
gk_errexit(SIGERR, "Error: Failed to read (i, j, val) for nnz: %zd.\n", i);
}
else {
if (fscanf(fpin, "%d %d", &iinds[i], &jinds[i]) != 2)
gk_errexit(SIGERR, "Error: Failed to read (i, j) value for nnz: %zd.\n", i);
}
iinds[i] += numbering;
jinds[i] += numbering;
if (nrows < iinds[i])
nrows = iinds[i];
if (ncols < jinds[i])
ncols = jinds[i];
}
nrows++;
ncols++;
gk_fclose(fpin);
/* convert (i, j, v) into a CSR matrix */
mat = gk_csr_Create();
mat->nrows = nrows;
mat->ncols = ncols;
rowptr = mat->rowptr = gk_zsmalloc(nrows+1, 0, "rowptr");
rowind = mat->rowind = gk_i32malloc(nnz, "rowind");
if (readvals)
rowval = mat->rowval = gk_fmalloc(nnz, "rowval");
for (i=0; i<nnz; i++)
rowptr[iinds[i]]++;
MAKECSR(i, nrows, rowptr);
for (i=0; i<nnz; i++) {
rowind[rowptr[iinds[i]]] = jinds[i];
if (readvals)
rowval[rowptr[iinds[i]]] = vals[i];
rowptr[iinds[i]]++;
}
SHIFTCSR(i, nrows, rowptr);
gk_free((void **)&iinds, &jinds, &vals, LTERM);
return mat;
break;
case GK_CSR_FMT_BIJV:
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
if (fread(&nnz, sizeof(size_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nnz from file %s!\n", filename);
if (fread(&readvals, sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the readvals from file %s!\n", filename);
/* read the data into three arrays */
iinds = gk_i32malloc(nnz, "iinds");
jinds = gk_i32malloc(nnz, "jinds");
vals = (readvals ? gk_fmalloc(nnz, "vals") : NULL);
for (i=0; i<nnz; i++) {
if (fread(&(iinds[i]), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read iinds[i] from file %s!\n", filename);
if (fread(&(jinds[i]), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read jinds[i] from file %s!\n", filename);
if (readvals) {
if (fread(&(vals[i]), sizeof(float), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read vals[i] from file %s!\n", filename);
}
//printf("%d %d\n", iinds[i], jinds[i]);
}
gk_fclose(fpin);
/* convert (i, j, v) into a CSR matrix */
rowptr = mat->rowptr = gk_zsmalloc(mat->nrows+1, 0, "rowptr");
rowind = mat->rowind = gk_i32malloc(nnz, "rowind");
if (readvals)
rowval = mat->rowval = gk_fmalloc(nnz, "rowval");
for (i=0; i<nnz; i++)
rowptr[iinds[i]]++;
MAKECSR(i, mat->nrows, rowptr);
for (i=0; i<nnz; i++) {
rowind[rowptr[iinds[i]]] = jinds[i];
if (readvals)
rowval[rowptr[iinds[i]]] = vals[i];
rowptr[iinds[i]]++;
}
SHIFTCSR(i, mat->nrows, rowptr);
gk_free((void **)&iinds, &jinds, &vals, LTERM);
return mat;
break;
/* the following are handled by a common input code, that comes after the switch */
case GK_CSR_FMT_CLUTO:
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3)
gk_errexit(SIGERR, "Header line must contain 3 integers.\n");
readsizes = 0;
readwgts = 0;
readvals = 1;
numbering = 1;
break;
case GK_CSR_FMT_METIS:
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
fmt = ncon = 0;
nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon);
if (nfields < 2)
gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n");
ncols = nrows;
nnz *= 2;
if (fmt > 111)
gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt);
sprintf(fmtstr, "%03zu", fmt%1000);
readsizes = (fmtstr[0] == '1');
readwgts = (fmtstr[1] == '1');
readvals = (fmtstr[2] == '1');
numbering = 1;
ncon = (ncon == 0 ? 1 : ncon);
break;
case GK_CSR_FMT_CSR:
readsizes = 0;
readwgts = 0;
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && nnz%2 == 1)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals);
if (readvals == 1)
nnz = nnz/2;
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
break;
default:
gk_errexit(SIGERR, "Unknown csr format.\n");
return NULL;
}
mat = gk_csr_Create();
mat->nrows = nrows;
rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr");
rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind");
if (readvals != 2)
rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval");
if (readsizes)
mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes");
if (readwgts)
mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts");
/*----------------------------------------------------------------------
* Read the sparse matrix file
*---------------------------------------------------------------------*/
numbering = (numbering ? -1 : 0);
for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) {
do {
if (gk_getline(&line, &lnlen, fpin) == -1)
gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i);
} while (line[0] == '%');
head = line;
tail = NULL;
/* Read vertex sizes */
if (readsizes) {
#ifdef __MSC__
mat->rsizes[i] = (float)strtod(head, &tail);
#else
mat->rsizes[i] = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1);
if (mat->rsizes[i] < 0)
errexit("The size for vertex %zd must be >= 0\n", i+1);
head = tail;
}
/* Read vertex weights */
if (readwgts) {
for (l=0; l<ncon; l++) {
#ifdef __MSC__
mat->rwgts[i*ncon+l] = (float)strtod(head, &tail);
#else
mat->rwgts[i*ncon+l] = strtof(head, &tail);
#endif
if (tail == head)
errexit("The line for vertex %zd does not have enough weights "
"for the %d constraints.\n", i+1, ncon);
if (mat->rwgts[i*ncon+l] < 0)
errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l);
head = tail;
}
}
/* Read the rest of the row */
while (1) {
ival = (int)strtol(head, &tail, 0);
if (tail == head)
break;
head = tail;
if ((rowind[k] = ival + numbering) < 0)
gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i);
ncols = gk_max(rowind[k], ncols);
if (readvals == 1) {
#ifdef __MSC__
fval = (float)strtod(head, &tail);
#else
fval = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k);
head = tail;
rowval[k] = fval;
}
k++;
}
rowptr[i+1] = k;
}
if (format == GK_CSR_FMT_METIS) {
ASSERT(ncols+1 == mat->nrows);
mat->ncols = mat->nrows;
}
else {
mat->ncols = ncols+1;
}
if (k != nnz)
gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in "
"the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k);
gk_fclose(fpin);
gk_free((void **)&line, LTERM);
return mat;
}
/**************************************************************************/
/*! Writes the row-based structure of a matrix into a file.
\param mat is the matrix to be written,
\param filename is the name of the output file.
\param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR,
GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL, GK_CSR_FMT_BIJV.
\param writevals is either 1 or 0 indicating if the values will be
written or not. This is only applicable when GK_CSR_FMT_CSR
is used.
\param numbering is either 1 or 0 indicating if the internal 0-based
numbering will be shifted by one or not during output. This
is only applicable when GK_CSR_FMT_CSR is used.
*/
/**************************************************************************/
void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering)
{
ssize_t i, j;
int32_t edge[2];
FILE *fpout;
switch (format) {
case GK_CSR_FMT_METIS:
if (mat->nrows != mat->ncols || mat->rowptr[mat->nrows]%2 == 1)
gk_errexit(SIGERR, "METIS output format requires a square symmetric matrix.\n");
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
fprintf(fpout, "%d %zd\n", mat->nrows, mat->rowptr[mat->nrows]/2);
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++)
fprintf(fpout, " %d", mat->rowind[j]+1);
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
break;
case GK_CSR_FMT_BINROW:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout);
fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout);
if (writevals)
fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout);
gk_fclose(fpout);
return;
break;
case GK_CSR_FMT_BINCOL:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout);
fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout);
if (writevals)
fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout);
gk_fclose(fpout);
return;
break;
case GK_CSR_FMT_IJV:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
numbering = (numbering ? 1 : 0);
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
if (writevals)
fprintf(fpout, "%zd %d %.8f\n", i+numbering, mat->rowind[j]+numbering, mat->rowval[j]);
else
fprintf(fpout, "%zd %d\n", i+numbering, mat->rowind[j]+numbering);
}
}
gk_fclose(fpout);
return;
break;
case GK_CSR_FMT_BIJV:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(&(mat->rowptr[mat->nrows]), sizeof(size_t), 1, fpout);
fwrite(&writevals, sizeof(int32_t), 1, fpout);
for (i=0; i<mat->nrows; i++) {
edge[0] = i;
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
edge[1] = mat->rowind[j];
fwrite(edge, sizeof(int32_t), 2, fpout);
if (writevals)
fwrite(&(mat->rowval[j]), sizeof(float), 1, fpout);
}
}
gk_fclose(fpout);
return;
break;
default:
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
if (format == GK_CSR_FMT_CLUTO) {
fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]);
writevals = 1;
numbering = 1;
}
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0));
if (writevals)
fprintf(fpout, " %f", mat->rowval[j]);
}
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
}
}
/*************************************************************************/
/*! Prunes certain rows/columns of the matrix. The prunning takes place
by analyzing the row structure of the matrix. The prunning takes place
by removing rows/columns but it does not affect the numbering of the
remaining rows/columns.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param minf is the minimum number of rows (columns) that a column (row) must
be present in order to be kept,
\param maxf is the maximum number of rows (columns) that a column (row) must
be present at in order to be kept.
\returns the prunned matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf)
{
ssize_t i, j, nnz;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind, *collen;
float *rowval, *nrowval;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval");
switch (what) {
case GK_CSR_COL:
collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ASSERT(rowind[j] < ncols);
collen[rowind[j]]++;
}
}
for (i=0; i<ncols; i++)
collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0);
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (collen[rowind[j]]) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&collen, LTERM);
break;
case GK_CSR_ROW:
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) {
for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight entries whose
sum accounts for a certain fraction of the overall weight of the
row/column.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param norm indicates the norm that will be used to aggregate the weights
and possible values are 1 or 2,
\param fraction is the fraction of the overall norm that will be retained
by the kept entries.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction)
{
ssize_t i, j, nnz;
int nrows, ncols, ncand, maxlen=0;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval, rsum, tsum;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++)
maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<ncols; i++) {
for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
gk_free((void **)&cand, LTERM);
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
for (i=0; i<nrows; i++)
maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<nrows; i++) {
for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[rowptr[i]+j] = cand[j].val;
nrowval[rowptr[i]+j] = cand[j].key;
}
nrowptr[i+1] = rowptr[i]+j;
}
gk_free((void **)&cand, LTERM);
}
/* compact nrowind/nrowval */
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight top-K entries
along each row/column and those entries whose weight is greater than
a specified value.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param topk is the number of the highest weight entries to keep.
\param keepval is the weight of a term above which will be kept. This
is used to select additional terms past the first topk.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval)
{
ssize_t i, j, k, nnz;
int nrows, ncols, ncand;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++) {
for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++) {
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
for (; j<ncand; j++) {
if (cand[j].key < keepval)
break;
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&cand, LTERM);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++, nnz++) {
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
for (; j<ncand; j++, nnz++) {
if (cand[j].key < keepval)
break;
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&cand, LTERM);
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the terms whose contribution to
the total length of the document is greater than a user-splied multiple
over the average.
This routine assumes that the vectors are normalized to be unit length.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param zscore is the multiplicative factor over the average contribution
to the length of the document.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore)
{
ssize_t i, j, nnz;
int nrows;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
float *rowval, *nrowval, avgwgt;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval");
switch (what) {
case GK_CSR_COL:
gk_errexit(SIGERR, "This has not been implemented yet.\n");
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
avgwgt = zscore/(rowptr[i+1]-rowptr[i]);
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] > avgwgt) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Compacts the column-space of the matrix by removing empty columns.
As a result of the compaction, the column numbers are renumbered.
The compaction operation is done in place and only affects the row-based
representation of the matrix.
The new columns are ordered in decreasing frequency.
\param mat the matrix whose empty columns will be removed.
*/
/**************************************************************************/
void gk_csr_CompactColumns(gk_csr_t *mat)
{
ssize_t i;
int nrows, ncols, nncols;
ssize_t *rowptr;
int *rowind, *colmap;
gk_ikv_t *clens;
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap");
clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens");
for (i=0; i<ncols; i++) {
clens[i].key = 0;
clens[i].val = i;
}
for (i=0; i<rowptr[nrows]; i++)
clens[rowind[i]].key++;
gk_ikvsortd(ncols, clens);
for (nncols=0, i=0; i<ncols; i++) {
if (clens[i].key > 0)
colmap[clens[i].val] = nncols++;
else
break;
}
for (i=0; i<rowptr[nrows]; i++)
rowind[i] = colmap[rowind[i]];
mat->ncols = nncols;
gk_free((void **)&colmap, &clens, LTERM);
}
/*************************************************************************/
/*! Sorts the indices in increasing order
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of
indices to sort.
*/
/**************************************************************************/
void gk_csr_SortIndices(gk_csr_t *mat, int what)
{
int n, nn=0;
ssize_t *ptr;
int *ind;
float *val;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
n = mat->nrows;
ptr = mat->rowptr;
ind = mat->rowind;
val = mat->rowval;
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
n = mat->ncols;
ptr = mat->colptr;
ind = mat->colind;
val = mat->colval;
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
#pragma omp parallel if (n > 100)
{
ssize_t i, j, k;
gk_ikv_t *cand;
float *tval;
#pragma omp single
for (i=0; i<n; i++)
nn = gk_max(nn, ptr[i+1]-ptr[i]);
cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand");
tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval");
#pragma omp for schedule(static)
for (i=0; i<n; i++) {
for (k=0, j=ptr[i]; j<ptr[i+1]; j++) {
if (j > ptr[i] && ind[j] < ind[j-1])
k = 1; /* an inversion */
cand[j-ptr[i]].val = j-ptr[i];
cand[j-ptr[i]].key = ind[j];
tval[j-ptr[i]] = val[j];
}
if (k) {
gk_ikvsorti(ptr[i+1]-ptr[i], cand);
for (j=ptr[i]; j<ptr[i+1]; j++) {
ind[j] = cand[j-ptr[i]].key;
val[j] = tval[cand[j-ptr[i]].val];
}
}
}
gk_free((void **)&cand, &tval, LTERM);
}
}
/*************************************************************************/
/*! Creates a row/column index from the column/row data.
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which index
will be created.
*/
/**************************************************************************/
void gk_csr_CreateIndex(gk_csr_t *mat, int what)
{
/* 'f' stands for forward, 'r' stands for reverse */
ssize_t i, j, k, nf, nr;
ssize_t *fptr, *rptr;
int *find, *rind;
float *fval, *rval;
switch (what) {
case GK_CSR_COL:
nf = mat->nrows;
fptr = mat->rowptr;
find = mat->rowind;
fval = mat->rowval;
if (mat->colptr) gk_free((void **)&mat->colptr, LTERM);
if (mat->colind) gk_free((void **)&mat->colind, LTERM);
if (mat->colval) gk_free((void **)&mat->colval, LTERM);
nr = mat->ncols;
rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
case GK_CSR_ROW:
nf = mat->ncols;
fptr = mat->colptr;
find = mat->colind;
fval = mat->colval;
if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM);
if (mat->rowind) gk_free((void **)&mat->rowind, LTERM);
if (mat->rowval) gk_free((void **)&mat->rowval, LTERM);
nr = mat->nrows;
rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rptr[find[j]]++;
}
MAKECSR(i, nr, rptr);
if (rptr[nr] > 6*nr) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
SHIFTCSR(i, nr, rptr);
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rval[rptr[find[j]]++] = fval[j];
}
SHIFTCSR(i, nr, rptr);
}
}
else {
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++) {
k = find[j];
rind[rptr[k]] = i;
rval[rptr[k]++] = fval[j];
}
}
}
else {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
}
SHIFTCSR(i, nr, rptr);
}
}
/*************************************************************************/
/*! Normalizes the rows/columns of the matrix to be unit
length.
\param mat the matrix itself,
\param what indicates what will be normalized and is obtained by
specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL.
\param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm
*/
/**************************************************************************/
void gk_csr_Normalize(gk_csr_t *mat, int what, int norm)
{
ssize_t i, j;
int n;
ssize_t *ptr;
float *val, sum;
if (what&GK_CSR_ROW && mat->rowval) {
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
#pragma omp parallel for if (ptr[n] > OMPMINOPS) private(j,sum) schedule(static)
for (i=0; i<n; i++) {
sum = 0.0;
if (norm == 1) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]; /* assume val[j] > 0 */
if (sum > 0)
sum = 1.0/sum;
}
else if (norm == 2) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]*val[j];
if (sum > 0)
sum = 1.0/sqrt(sum);
}
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
if (what&GK_CSR_COL && mat->colval) {
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
#pragma omp parallel for if (ptr[n] > OMPMINOPS) private(j,sum) schedule(static)
for (i=0; i<n; i++) {
sum = 0.0;
if (norm == 1) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]; /* assume val[j] > 0 */
if (sum > 0)
sum = 1.0/sum;
}
else if (norm == 2) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]*val[j];
if (sum > 0)
sum = 1.0/sqrt(sum);
}
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
/*************************************************************************/
/*! Applies different row scaling methods.
\param mat the matrix itself,
\param type indicates the type of row scaling. Possible values are:
GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2.
*/
/**************************************************************************/
void gk_csr_Scale(gk_csr_t *mat, int type)
{
ssize_t i, j;
int nrows, ncols, nnzcols, bgfreq;
ssize_t *rowptr;
int *rowind, *collen;
float *rowval, *cscale, maxtf;
double logscale = 1.0/log(2.0);
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
switch (type) {
case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .5 + .5*rowval[j]/maxtf;
}
break;
case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .1 + .9*rowval[j]/maxtf;
}
break;
case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j])));
}
}
break;
case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j]))));
}
}
break;
case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65));
}
}
break;
case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75));
}
}
break;
case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85));
}
}
break;
case GK_CSR_LOG: /* TF' = 1+log_2(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) schedule(static,32)
for (i=0; i<rowptr[nrows]; i++) {
if (rowval[i] != 0.0)
rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale;
}
#ifdef XXX
#pragma omp parallel for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale;
//rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale);
}
}
#endif
break;
case GK_CSR_IDF: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
#pragma omp parallel for if (ncols > OMPMINOPS) schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0);
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
gk_free((void **)&cscale, &collen, LTERM);
break;
case GK_CSR_IDF2: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
nnzcols = 0;
#pragma omp parallel for if (ncols > OMPMINOPS) schedule(static) reduction(+:nnzcols)
for (i=0; i<ncols; i++)
nnzcols += (collen[i] > 0 ? 1 : 0);
bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols));
printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq);
#pragma omp parallel for if (ncols > OMPMINOPS) schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0);
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
gk_free((void **)&cscale, &collen, LTERM);
break;
default:
gk_errexit(SIGERR, "Unknown scaling type of %d\n", type);
}
}
/*************************************************************************/
/*! Computes the sums of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
sums to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSums(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *sums;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rsums)
gk_free((void **)&mat->rsums, LTERM);
sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->csums)
gk_free((void **)&mat->csums, LTERM);
sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
default:
gk_errexit(SIGERR, "Invalid sum type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
\note If the rowval/colval arrays are NULL, the matrix is assumed
to be binary and the norms are computed accordingly.
*/
/**************************************************************************/
void gk_csr_ComputeNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
if (val) {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = sqrt(gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1));
}
else {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = sqrt(ptr[i+1]-ptr[i]);
}
}
/*************************************************************************/
/*! Computes the squared of the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
\note If the rowval/colval arrays are NULL, the matrix is assumed
to be binary and the norms are computed accordingly.
*/
/**************************************************************************/
void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
if (val) {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1);
}
else {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = ptr[i+1]-ptr[i];
}
}
/*************************************************************************/
/*! Returns a new matrix whose rows/columns are shuffled.
\param mat the matrix to be shuffled,
\param what indicates if the rows (GK_CSR_ROW), columns (GK_CSR_COL),
or both (GK_CSR_ROWCOL) will be shuffled,
\param symmetric indicates if the same shuffling will be applied to
both rows and columns. This is valid with nrows==ncols and
GK_CSR_ROWCOL was specified.
\returns the shuffled matrix.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Shuffle(gk_csr_t *mat, int what, int symmetric)
{
ssize_t i, j;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
int *rperm, *cperm;
float *rowval, *nrowval;
gk_csr_t *nmat;
if (what == GK_CSR_ROWCOL && symmetric && mat->nrows != mat->ncols)
gk_errexit(SIGERR, "The matrix is not square for a symmetric rowcol shuffling.\n");
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
rperm = gk_imalloc(nrows, "gk_csr_Shuffle: rperm");
cperm = gk_imalloc(ncols, "gk_csr_Shuffle: cperm");
switch (what) {
case GK_CSR_ROW:
gk_RandomPermute(nrows, rperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(nrows, rperm, 0);
for (i=0; i<ncols; i++)
cperm[i] = i;
break;
case GK_CSR_COL:
gk_RandomPermute(ncols, cperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(ncols, cperm, 0);
for (i=0; i<nrows; i++)
rperm[i] = i;
break;
case GK_CSR_ROWCOL:
gk_RandomPermute(nrows, rperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(nrows, rperm, 0);
if (symmetric)
gk_icopy(nrows, rperm, cperm);
else {
gk_RandomPermute(ncols, cperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(ncols, cperm, 0);
}
break;
default:
gk_free((void **)&rperm, &cperm, LTERM);
gk_errexit(SIGERR, "Unknown shuffling type of %d\n", what);
return NULL;
}
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = ncols;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Shuffle: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Shuffle: nrowind");
nrowval = nmat->rowval = (rowval ? gk_fmalloc(rowptr[nrows], "gk_csr_Shuffle: nrowval") : NULL) ;
for (i=0; i<nrows; i++)
nrowptr[rperm[i]] = rowptr[i+1]-rowptr[i];
MAKECSR(i, nrows, nrowptr);
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
nrowind[nrowptr[rperm[i]]] = cperm[rowind[j]];
if (nrowval)
nrowval[nrowptr[rperm[i]]] = rowval[j];
nrowptr[rperm[i]]++;
}
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&rperm, &cperm, LTERM);
return nmat;
}
/*************************************************************************/
/*! Computes the similarity between two rows/columns
\param mat the matrix itself. The routine assumes that the indices
are sorted in increasing order.
\param i1 is the first row/column,
\param i2 is the second row/column,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of
objects between the similarity will be computed,
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\returns the similarity between the two rows/columns.
*/
/**************************************************************************/
float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype)
{
int nind1, nind2;
int *ind1, *ind2;
float *val1, *val2, stat1, stat2, sim;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
nind1 = mat->rowptr[i1+1]-mat->rowptr[i1];
nind2 = mat->rowptr[i2+1]-mat->rowptr[i2];
ind1 = mat->rowind + mat->rowptr[i1];
ind2 = mat->rowind + mat->rowptr[i2];
val1 = mat->rowval + mat->rowptr[i1];
val2 = mat->rowval + mat->rowptr[i2];
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
nind1 = mat->colptr[i1+1]-mat->colptr[i1];
nind2 = mat->colptr[i2+1]-mat->colptr[i2];
ind1 = mat->colind + mat->colptr[i1];
ind2 = mat->colind + mat->colptr[i2];
val1 = mat->colval + mat->colptr[i1];
val2 = mat->colval + mat->colptr[i2];
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return 0.0;
}
switch (simtype) {
case GK_CSR_COS:
case GK_CSR_JAC:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else {
sim += val1[i1]*val2[i2];
stat1 += val1[i1]*val1[i1];
stat2 += val2[i2]*val2[i2];
i1++;
i2++;
}
}
if (simtype == GK_CSR_COS)
sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);
else
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_MIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_AMIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1 > 0.0 ? sim/stat1 : 0.0);
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
return sim;
}
/*************************************************************************/
/*! Finds the n most similar rows (neighbors) to the query using cosine
similarity.
\param mat the matrix itself
\param nqterms is the number of columns in the query
\param qind is the list of query columns
\param qval is the list of correspodning query weights
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN. In case of GK_CSR_COS,
the rows and the query are assumed to be of unit length.
\param nsim is the maximum number of requested most similar rows.
If -1 is provided, then everything is returned unsorted.
\param minsim is the minimum similarity of the requested most
similar rows
\param hits is the result set. This array should be at least
of length nsim.
\param i_marker is an array of size equal to the number of rows
whose values are initialized to -1. If NULL is provided
then this array is allocated and freed internally.
\param i_cand is an array of size equal to the number of rows.
If NULL is provided then this array is allocated and freed
internally.
\returns The number of identified most similar rows, which can be
smaller than the requested number of nnbrs in those cases
in which there are no sufficiently many neighbors.
*/
/**************************************************************************/
int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind,
float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits,
int *i_marker, gk_fkv_t *i_cand)
{
ssize_t i, ii, j, k;
int nrows, ncols, ncand;
ssize_t *colptr;
int *colind, *marker;
float *colval, *rnorms, mynorm, *rsums, mysum;
gk_fkv_t *cand;
if (nqterms == 0)
return 0;
nrows = mat->nrows;
ncols = mat->ncols;
GKASSERT((colptr = mat->colptr) != NULL);
GKASSERT((colind = mat->colind) != NULL);
GKASSERT((colval = mat->colval) != NULL);
marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker"));
cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand"));
switch (simtype) {
case GK_CSR_COS:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
break;
case GK_CSR_JAC:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
GKASSERT((rnorms = mat->rnorms) != NULL);
mynorm = gk_fdot(nqterms, qval, 1, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key);
break;
case GK_CSR_MIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
GKASSERT((rsums = mat->rsums) != NULL);
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key);
break;
/* Assymetric MIN similarity */
case GK_CSR_AMIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/mysum;
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
/* go and prune the hits that are bellow minsim */
for (j=0, i=0; i<ncand; i++) {
marker[cand[i].val] = -1;
if (cand[i].key >= minsim)
cand[j++] = cand[i];
}
ncand = j;
if (nsim == -1 || nsim >= ncand) {
nsim = ncand;
}
else {
nsim = gk_min(nsim, ncand);
gk_dfkvkselect(ncand, nsim, cand);
gk_fkvsortd(nsim, cand);
}
gk_fkvcopy(nsim, cand, hits);
if (i_marker == NULL)
gk_free((void **)&marker, LTERM);
if (i_cand == NULL)
gk_free((void **)&cand, LTERM);
return nsim;
}
/*************************************************************************/
/*! This function finds the connected components in a graph.
\param mat is the graph structure in CSR format
\param cptr is the ptr structure of the CSR representation of the
components. The length of this vector must be mat->nrows+1.
\param cind is the indices structure of the CSR representation of
the components. The length of this vector must be mat->nrows.
\param cids is an array that stores the component # of each vertex
of the graph. The length of this vector must be mat->nrows.
\returns the number of components that it found.
\note The cptr, cind, and cids parameters can be NULL, in which case
only the number of connected components is returned.
*/
/*************************************************************************/
int gk_csr_FindConnectedComponents(gk_csr_t *mat, int32_t *cptr, int32_t *cind,
int32_t *cids)
{
// ssize_t jj, ii;
ssize_t i, j, nvtxs, k, first, last, ntodo, ncmps;
ssize_t *xadj;
int32_t *adjncy, *pos, *todo;
int32_t mustfree_ccsr=0;
//int32_t mustfree_where=0;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_FindComponents: The matrix needs to be square.\n");
return -1;
}
nvtxs = mat->nrows;
xadj = mat->rowptr;
adjncy = mat->rowind;
/* Deal with NULL supplied cptr/cind vectors */
if (cptr == NULL) {
cptr = gk_i32malloc(nvtxs+1, "gk_csr_FindComponents: cptr");
cind = gk_i32malloc(nvtxs, "gk_csr_FindComponents: cind");
mustfree_ccsr = 1;
}
/* The list of vertices that have not been touched yet.
The valid entries are from [0..ntodo). */
todo = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_csr_FindComponents: todo"));
/* For a vertex that has not been visited, pos[i] is the position in the
todo list that this vertex is stored.
If a vertex has been visited, pos[i] = -1. */
pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_csr_FindComponents: pos"));
/* Find the connected componends */
ncmps = -1;
ntodo = nvtxs; /* All vertices have not been visited */
first = last = 0; /* Point to the first and last vertices that have been touched
but not explored.
These vertices are stored in cind[first]...cind[last-1]. */
while (first < last || ntodo > 0) {
if (first == last) { /* Find another starting vertex */
cptr[++ncmps] = first; /* Mark the end of the current CC */
/* put the first vertex in the todo list as the start of the new CC */
ASSERT(pos[todo[0]] != -1);
cind[last++] = todo[0];
pos[todo[0]] = -1;
todo[0] = todo[--ntodo];
pos[todo[0]] = 0;
}
i = cind[first++]; /* Get the first visited but unexplored vertex */
for (j=xadj[i]; j<xadj[i+1]; j++) {
k = adjncy[j];
if (pos[k] != -1) {
cind[last++] = k;
/* Remove k from the todo list and put the last item in the todo
list at the position that k was so that the todo list will be
consequtive. The pos[] array is updated accordingly to keep track
the location of the vertices in the todo[] list. */
todo[pos[k]] = todo[--ntodo];
pos[todo[pos[k]]] = pos[k];
pos[k] = -1;
}
}
}
cptr[++ncmps] = first;
/* see if we need to return cids */
if (cids != NULL) {
for (i=0; i<ncmps; i++) {
for (j=cptr[i]; j<cptr[i+1]; j++)
cids[cind[j]] = i;
}
}
if (mustfree_ccsr)
gk_free((void **)&cptr, &cind, LTERM);
gk_free((void **)&pos, &todo, LTERM);
return (int) ncmps;
}
/*************************************************************************/
/*! Returns a symmetric version of a square matrix. The symmetric version
is constructed by applying an A op A^T operation, where op is one of
GK_CSR_SYM_SUM, GK_CSR_SYM_MIN, GK_CSR_SYM_MAX, GK_CSR_SYM_AVG.
\param mat the matrix to be symmetrized,
\param op indicates the operation to be performed. The possible values are
GK_CSR_SYM_SUM, GK_CSR_SYM_MIN, GK_CSR_SYM_MAX, and GK_CSR_SYM_AVG.
\returns the symmetrized matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_MakeSymmetric(gk_csr_t *mat, int op)
{
// ssize_t k;
ssize_t i, j, nnz;
int nrows, nadj, hasvals;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind, *marker, *ids;
float *rowval=NULL, *colval=NULL, *nrowval=NULL, *wgts=NULL;
gk_csr_t *nmat;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_MakeSymmetric: The matrix needs to be square.\n");
return NULL;
}
hasvals = (mat->rowval != NULL);
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
if (hasvals)
rowval = mat->rowval;
/* create the column view for efficient processing */
colptr = gk_zsmalloc(nrows+1, 0, "colptr");
colind = gk_i32malloc(rowptr[nrows], "colind");
if (hasvals)
colval = gk_fmalloc(rowptr[nrows], "colval");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
colptr[rowind[j]]++;
}
MAKECSR(i, nrows, colptr);
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
colind[colptr[rowind[j]]] = i;
if (hasvals)
colval[colptr[rowind[j]]] = rowval[j];
colptr[rowind[j]]++;
}
}
SHIFTCSR(i, nrows, colptr);
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_MakeSymmetric: nrowptr");
nrowind = nmat->rowind = gk_imalloc(2*rowptr[nrows], "gk_csr_MakeSymmetric: nrowind");
if (hasvals)
nrowval = nmat->rowval = gk_fmalloc(2*rowptr[nrows], "gk_csr_MakeSymmetric: nrowval");
marker = gk_ismalloc(nrows, -1, "marker");
ids = gk_imalloc(nrows, "ids");
if (hasvals)
wgts = gk_fmalloc(nrows, "wgts");
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
nadj = 0;
/* out-edges */
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ids[nadj] = rowind[j];
if (hasvals)
wgts[nadj] = (op == GK_CSR_SYM_AVG ? 0.5*rowval[j] : rowval[j]);
marker[rowind[j]] = nadj++;
}
/* in-edges */
for (j=colptr[i]; j<colptr[i+1]; j++) {
if (marker[colind[j]] == -1) {
if (op != GK_CSR_SYM_MIN) {
ids[nadj] = colind[j];
if (hasvals)
wgts[nadj] = (op == GK_CSR_SYM_AVG ? 0.5*colval[j] : colval[j]);
nadj++;
}
}
else {
if (hasvals) {
switch (op) {
case GK_CSR_SYM_MAX:
wgts[marker[colind[j]]] = gk_max(colval[j], wgts[marker[colind[j]]]);
break;
case GK_CSR_SYM_MIN:
wgts[marker[colind[j]]] = gk_min(colval[j], wgts[marker[colind[j]]]);
break;
case GK_CSR_SYM_SUM:
wgts[marker[colind[j]]] += colval[j];
break;
case GK_CSR_SYM_AVG:
wgts[marker[colind[j]]] = 0.5*(wgts[marker[colind[j]]] + colval[j]);
break;
default:
errexit("Unsupported op for MakeSymmetric!\n");
}
}
marker[colind[j]] = -1;
}
}
/* go over out edges again to resolve any edges that were not found in the in
* edges */
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (marker[rowind[j]] != -1) {
if (op == GK_CSR_SYM_MIN)
ids[marker[rowind[j]]] = -1;
marker[rowind[j]] = -1;
}
}
/* put the non '-1' entries in ids[] into i's row */
for (j=0; j<nadj; j++) {
if (ids[j] != -1) {
nrowind[nnz] = ids[j];
if (hasvals)
nrowval[nnz] = wgts[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&colptr, &colind, &colval, &marker, &ids, &wgts, LTERM);
return nmat;
}
|
GB_binop__lt_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int8)
// A*D function (colscale): GB (_AxD__lt_int8)
// D*A function (rowscale): GB (_DxB__lt_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int8)
// C=scalar+B GB (_bind1st__lt_int8)
// C=scalar+B' GB (_bind1st_tran__lt_int8)
// C=A+scalar GB (_bind2nd__lt_int8)
// C=A'+scalar GB (_bind2nd_tran__lt_int8)
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_INT8 || GxB_NO_LT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, and false is returned.
bool CheckConstraintExpression(Expr *CE);
bool CalculateConstraintSatisfaction(ConceptDecl *NamedConcept,
MultiLevelTemplateArgumentList &MLTAL,
Expr *ConstraintExpr,
bool &IsSatisfied);
/// Check that the associated constraints of a template declaration match the
/// associated constraints of an older declaration of which it is a
/// redeclaration.
bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old,
TemplateParameterList *New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(FunctionDecl *MD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
SourceLocation ConceptNameLoc, NamedDecl *FoundDecl,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, TemplateDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
using OMPCtxStringType = SmallString<8>;
using OMPCtxSelectorData =
OpenMPCtxSelectorData<SmallVector<OMPCtxStringType, 4>, ExprResult>;
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(
DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param Data Set of context-specific data for the specified context
/// selector.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
SourceRange SR,
ArrayRef<OMPCtxSelectorData> Data);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
bool &DerivedToBase, bool &ObjCConversion,
bool &ObjCLifetimeConversion,
bool &FunctionConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
main.c | #include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include <time.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
int main (int argc, char** argv)
{
// Initialization of variables
int i, j, k, t, height = 360, width = 360, depth = 360, num_iterations = 1000, opt;
float alpha = 0.1;
#ifndef _OPENMP
clock_t before, after;
#else
double before, after;
#endif
double time_used;
// Parsing command-line options
while ((opt = getopt(argc, argv, "h:w:d:t:a:")) != -1) {
switch (opt) {
case 'h':
height = atoi(optarg);
break;
case 'w':
width = atoi(optarg);
break;
case 'd':
depth = atoi(optarg);
break;
case 't':
num_iterations = atoi(optarg);
break;
case 'a':
alpha = atof(optarg);
break;
default:
fprintf(stderr, "Usage: %s [-h height] [-w width] [-d depth] [-t no. iterations] [-a alpha value for heat eq.]\n", argv[0]);
exit(EXIT_FAILURE);
}
}
// beta reduces the stencil operation to only require 6 flops (instead of 7)
float beta = (1 - 6*alpha);
// Allocate matrices
float ***tmp; // temporary pointer to perform pointer swaps
float ***a = (float***) malloc(height*sizeof(float**));
float ***b = (float***) malloc(height*sizeof(float**));
for (i = 0; i < height; ++i) {
a[i] = (float**) malloc(width*sizeof(float*));
b[i] = (float**) malloc(width*sizeof(float*));
for (j = 0; j < width; ++j) {
a[i][j] = (float*) malloc(depth*sizeof(float));
b[i][j] = (float*) malloc(depth*sizeof(float));
}
}
// Instantiate random values in matrices
#pragma omp parallel for private(j)
for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
for (k = 0; k < depth; ++k) {
a[i][j][k] = (float) rand() / (float) (RAND_MAX);
b[i][j][k] = a[i][j][k];
}
}
}
// Start timer
#ifndef _OPENMP
before = clock();
#else
before = omp_get_wtime();
#endif
// Perform computations
#pragma omp parallel private(t,i,j,k)
{
#ifdef _OPENMP
#pragma omp single
{
printf("Using %d OpenMP threads to parallelize heat equation\n", omp_get_num_threads());
fflush(NULL);
}
#endif
// Perform heat equation
for (t = 0; t < num_iterations; ++t) {
#pragma omp for
for (i = 1; i < height - 1; ++i)
for (j = 1; j < width - 1; ++j)
for (k = 1; k < depth - 1; ++k)
b[i][j][k] = beta*a[i][j][k] + alpha*(
a[i+1][j][k] + a[i-1][j][k] + a[i][j+1][k] + a[i][j-1][k] + a[i][j][k+1] + a[i][j][k-1]);
#pragma omp single
{
// pointer swap
tmp = b;
b = a;
a = tmp;
}
}
}
// End timer and evaluate time used
#ifndef _OPENMP
after = clock();
time_used = (float) (after - before) / (float) CLOCKS_PER_SEC;
#else
after = omp_get_wtime();
time_used = after - before;
#endif
// deallocate matrices
for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
free(a[i][j]);
free(b[i][j]);
}
free(a[i]);
free(b[i]);
}
free(a);
free(b);
// Report parameters and results
float base = 1e-9*(float)num_iterations/time_used;
float gflops = base*(float)(height-2)*(float)(width-2)*(float)(depth-2)*8.0;
float bandwidth = base*sizeof(float)*(float)height*(float)width*(float)depth*2.0;
printf("3D Grid : %d x %d x %d\n", height, width, depth);
printf("Iterations : %d\n", num_iterations);
printf("alpha : %g\n", alpha);
printf("Time : %f s\n", time_used);
printf("Throughput : %f GFLOPS\n", gflops);
printf("Minimal Bandwidth : %f GB/s\n", bandwidth);
return EXIT_SUCCESS;
} |
trsm_x_sky_u_hi_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT c = A->cols - 1; c >= 0; c--)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ic = A->cols - 1; ic > c; ic--)
{
ALPHA_INT start = A->pointers[ic];
ALPHA_INT end = A->pointers[ic + 1];
ALPHA_INT eles_num = ic - c;
if(end - eles_num - 1 >= start)
alpha_madde(temp, A->values[end - eles_num - 1], y[out_y_col * ldy + ic]);
}
ALPHA_Number t;
alpha_mul(t, alpha, x[out_y_col * ldx + c]);
alpha_sub(y[out_y_col * ldy + c], t, temp);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
zgbtrs.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gbtrs
*
* Solves a system of linear equations A * X = B with triangular factorization
* computed by plasma_zpbtrf or plasma_zgbtrf.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in] kl
* The number of subdiagonals within the band of A. kl >= 0.
*
* @param[in] ku
* The number of superdiagonals within the band of A. ku >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of
* columns of the matrix B. nrhs >= 0.
*
* @param[in,out] AB
* Details of the LU factorization of the band matrix A, as
* computed by plasma_zgbtrf.
*
* @param[in] ldab
* The leading dimension of the array AB.
*
* @param[in] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zgbtrs
* @sa plasma_cgbtrs
* @sa plasma_dgbtrs
* @sa plasma_sgbtrs
* @sa plasma_zpbtrf
*
******************************************************************************/
int plasma_zgbtrs(plasma_enum_t trans, int n, int kl, int ku, int nrhs,
plasma_complex64_t *pAB, int ldab,
int *ipiv,
plasma_complex64_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (kl < 0) {
plasma_error("illegal value of kd");
return -3;
}
if (ku < 0) {
plasma_error("illegal value of ku");
return -4;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -5;
}
if (ldab < imax(1, 1+kl+ku)) {
plasma_error("illegal value of ldab");
return -7;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imax(n, nrhs) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
plasma_desc_t AB;
plasma_desc_t B;
int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use zgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_general_band_create(PlasmaComplexDouble, PlasmaGeneral,
nb, nb, lm, n, 0, 0, n, n, kl, ku,
&AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_band_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zpb2desc(pAB, ldab, AB, sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, sequence, &request);
// Call the tile async function.
plasma_omp_zgbtrs(trans, AB, ipiv, B, sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gbtrs
*
* Solves a system of linear equations using previously
* computed factorization.
* Non-blocking tile version of plasma_zgbtrs().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] AB
* The triangular factor U or L from the Cholesky factorization
* A = U^H*U or A = L*L^H, computed by plasma_zpotrf.
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zgbtrs
* @sa plasma_omp_zgbtrs
* @sa plasma_omp_cgbtrs
* @sa plasma_omp_dgbtrs
* @sa plasma_omp_sgbtrs
* @sa plasma_omp_zgbtrf
*
******************************************************************************/
void plasma_omp_zgbtrs(plasma_enum_t trans, plasma_desc_t AB, int *ipiv, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (AB.n == 0 || B.n == 0)
return;
// Call the parallel functions.
if (trans == PlasmaNoTrans) {
plasma_pztbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans,
PlasmaUnit,
1.0, AB,
B,
ipiv,
sequence, request);
plasma_pztbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans,
PlasmaNonUnit,
1.0, AB,
B,
ipiv,
sequence, request);
}
else {
plasma_pztbsm(PlasmaLeft, PlasmaUpper, trans,
PlasmaNonUnit,
1.0, AB,
B,
ipiv,
sequence, request);
plasma_pztbsm(PlasmaLeft, PlasmaLower, trans,
PlasmaUnit,
1.0, AB,
B,
ipiv,
sequence, request);
}
}
|
blur.c | #include "blur.h"
#include <stdlib.h>
#include <string.h>
static void box_blur_h(unsigned char *dest, unsigned char *src,
int height, int width, int radius) {
double coeff = 1.0 / (radius * 2 + 1);
#pragma omp parallel for
for (int i = 0; i < height; ++i) {
int iwidth = i * width;
double r_acc = 0.0;
double g_acc = 0.0;
double b_acc = 0.0;
for (int j = -radius; j < width; ++j) {
if (j - radius - 1 >= 0) {
int index = (iwidth + j - radius - 1) * 3;
r_acc -= coeff * src[index];
g_acc -= coeff * src[index + 1];
b_acc -= coeff * src[index + 2];
}
if (j + radius < width) {
int index = (iwidth + j + radius) * 3;
r_acc += coeff * src[index];
g_acc += coeff * src[index + 1];
b_acc += coeff * src[index + 2];
}
if (j < 0) continue;
int index = (iwidth + j) * 3;
dest[index] = r_acc + 0.5;
dest[index + 1] = g_acc + 0.5;
dest[index + 2] = b_acc + 0.5;
}
}
}
static void box_blur_v(unsigned char *dest, unsigned char *src,
int height, int width,
int radius) {
double coeff = 1.0 / (radius * 2 + 1);
#pragma omp parallel for
for (int j = 0; j < width; ++j) {
double r_acc = 0.0;
double g_acc = 0.0;
double b_acc = 0.0;
for (int i = -radius; i < height; ++i) {
if (i - radius - 1 >= 0) {
int index = ((i - radius - 1) * width + j) * 3;
r_acc -= coeff * src[index];
g_acc -= coeff * src[index + 1];
b_acc -= coeff * src[index + 2];
}
if (i + radius < height) {
int index = ((i + radius) * width + j) * 3;
r_acc += coeff * src[index];
g_acc += coeff * src[index + 1];
b_acc += coeff * src[index + 2];
}
if (i < 0) continue;
int index = (i * width + j) * 3;
dest[index] = r_acc + 0.5;
dest[index + 1] = g_acc + 0.5;
dest[index + 2] = b_acc + 0.5;
}
}
}
static void box_blur_once(unsigned char *dest, unsigned char *src,
int height, int width, int radius) {
unsigned char *tmp = malloc(height * width * 3);
box_blur_h(tmp, src, height, width, radius);
box_blur_v(dest, tmp, height, width, radius);
free(tmp);
}
void box_blur(unsigned char *dest, Screenshot s, int radius, int times) {
box_blur_once(dest, s.data, s.height, s.width, radius);
for (int i = 0; i < times - 1; ++i) {
memcpy(s.data, dest, s.height * s.width * 3);
box_blur_once(dest, s.data, s.height, s.width, radius);
}
}
void pixelate(unsigned char *dest, Screenshot s, int radius) {
radius = radius * 2 + 1;
#pragma omp parallel for
for (int i = 0; i < s.height; i += radius) {
for (int j = 0; j < s.width; j += radius) {
int amount = 0;
int r = 0;
int g = 0;
int b = 0;
for (int k = 0; k < radius; ++k) {
if (i + k >= s.height) break;
for (int l = 0; l < radius; ++l) {
if (j + l >= s.width) break;
++amount;
int index = ((i + k) * s.width + (j + l)) * 3;
r += s.data[index];
g += s.data[index + 1];
b += s.data[index + 2];
}
}
r /= amount;
g /= amount;
b /= amount;
for (int k = 0; k < radius; ++k) {
if (i + k >= s.height) break;
for (int l = 0; l < radius; ++l) {
if (j + l >= s.width) break;
int index = ((i + k) * s.width + (j + l)) * 3;
dest[index] = r;
dest[index + 1] = g;
dest[index + 2] = b;
}
}
}
}
}
|
cv_onlinemil.h | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef MTF_ONLINE_MIL_H
#define MTF_ONLINE_MIL_H
#undef min
#undef max
#include <fstream>
#include <iostream>
#include <limits>
#include <math.h>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
namespace mtf
{
namespace mil
{
typedef unsigned int uint;
typedef std::vector<float> vectorf;
typedef std::vector<int> vectori;
typedef std::vector<bool> vectorb;
template<typename ScalarT>int sign(ScalarT s){
return ((s > 0) ? 1 : ((s < 0) ? -1 : 0));
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// random generator stuff
struct RandomGenerator
{
public:
static
void
initialize(const int init)
{
rng_ = cv::RNG(init);
}
static
int
randint(const int min, const int max)
{
return rng_.uniform(min, max);
}
static
float
randfloat(const float min = 0, const float max = 1)
{
return rng_.uniform(min, max);
}
static cv::RNG rng_;
};
inline float
sigmoid(float x)
{
return 1.0f / (1.0f + exp(-x));
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// vector functions
template<class T> class SortableElement
{
public:
T _val;
int _ind;
SortableElement()
:
_ind(0)
{
}
SortableElement(T val, int ind)
{
_val = val;
_ind = ind;
}
bool
operator<(SortableElement &b)
{
return (_val > b._val);
}
;
};
template<class T> class SortableElementRev
{
public:
T _val;
int _ind;
SortableElementRev()
:
_ind(0)
{
}
SortableElementRev(T val, int ind)
{
_val = val;
_ind = ind;
}
bool
operator<(SortableElementRev<T> &b)
{
return (_val < b._val);
}
;
};
static bool
CompareSortableElementRev(const SortableElementRev<float>& i, const SortableElementRev<float>& j)
{
return i._val < j._val;
}
template<class T> void
sort_order_des(std::vector<T> &v, vectori &order)
{
uint n = (uint) v.size();
std::vector<SortableElementRev<T> > v2;
v2.resize(n);
order.clear();
order.resize(n);
for (uint i = 0; i < n; i++)
{
v2[i]._ind = i;
v2[i]._val = v[i];
}
//std::sort( v2.begin(), v2.end() );
std::sort(v2.begin(), v2.end(), CompareSortableElementRev);
for (uint i = 0; i < n; i++)
{
order[i] = v2[i]._ind;
v[i] = v2[i]._val;
}
}
;
template<class T> void
resizeVec(std::vector<std::vector<T> > &v, int sz1, int sz2, T val = 0)
{
v.resize(sz1);
for (int k = 0; k < sz1; k++)
v[k].resize(sz2, val);
}
;
////template<class T> inline uint min_idx( const vector<T> &v )
////{
//// return (uint)(min_element(v.begin(),v.end())._Myptr-v.begin()._Myptr);
////}
template<class T> inline uint
max_idx(const std::vector<T> &v)
{
const T* findPtr = &(*std::max_element(v.begin(), v.end()));
const T* beginPtr = &(*v.begin());
return (uint) (findPtr - beginPtr);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// error functions
inline void
abortError(const int line, const char *file, const char *msg = NULL)
{
if (msg == NULL)
fprintf(stderr, "%s %d: ERROR\n", file, line);
else
fprintf(stderr, "%s %d: ERROR: %s\n", file, line, msg);
exit(0);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
class Sample
{
public:
Sample(const cv::Mat & img, const std::vector<cv::Mat_<float> > & ii_imgs, int row, int col, int width = 0,
int height = 0, float weight = 1.0);
Sample()
{
_row = _col = _height = _width = 0;
_weight = 1.0f;
}
Sample&
operator=(const Sample &a);
public:
cv::Mat _img;
std::vector<cv::Mat_<float> > _ii_imgs;
int _row, _col, _width, _height;
float _weight;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////
class SampleSet
{
public:
SampleSet()
{
}
;
SampleSet(const Sample &s)
{
_samples.push_back(s);
}
;
int
size() const
{
return _samples.size();
}
;
void
push_back(const Sample &s)
{
_samples.push_back(s);
}
;
void
push_back(const cv::Mat & img, const std::vector<cv::Mat_<float> > & ii_img, int x, int y, int width = 0,
int height = 0, float weight = 1.0f);
void
resize(int i)
{
_samples.resize(i);
}
;
void
resizeFtrs(int i);
float &
getFtrVal(int sample, int ftr)
{
return _ftrVals[ftr](sample);
}
;
float
getFtrVal(int sample, int ftr) const
{
return _ftrVals[ftr](sample);
}
;
Sample &
operator[](const int sample)
{
return _samples[sample];
}
;
Sample
operator[](const int sample) const
{
return _samples[sample];
}
;
const cv::Mat_<float> &
ftrVals(int ftr) const
{
return _ftrVals[ftr];
}
bool
ftrsComputed() const
{
return !_ftrVals.empty() && !_samples.empty() && !_ftrVals[0].empty();
}
;
void
clear()
{
_ftrVals.clear();
_samples.clear();
}
;
// densely sample the image in a donut shaped region: will take points inside circle of radius inrad,
// but outside of the circle of radius outrad. when outrad=0 (default), then just samples points inside a circle
void
sampleImage(const cv::Mat & img, const std::vector<cv::Mat_<float> > & ii_imgs, int x, int y, int w, int h,
float inrad, float outrad = 0, int maxnum = 1000000);
void
sampleImage(const cv::Mat & img, const std::vector<cv::Mat_<float> > & ii_imgs, uint num, int w, int h);
private:
std::vector<Sample> _samples;
std::vector<cv::Mat_<float> > _ftrVals; // [ftr][sample]
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////
inline Sample&
Sample::operator=(const Sample &a)
{
_img = a._img;
_row = a._row;
_col = a._col;
_width = a._width;
_height = a._height;
return (*this);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////
inline void
SampleSet::resizeFtrs(int nftr)
{
_ftrVals.resize(nftr);
int nsamp = _samples.size();
if (nsamp > 0)
for (int k = 0; k < nftr; k++)
_ftrVals[k].create(1, nsamp);
}
inline void
SampleSet::push_back(const cv::Mat & img, const std::vector<cv::Mat_<float> > & ii_imgs, int x, int y, int width,
int height, float weight)
{
Sample s(img, ii_imgs, y, x, width, height, weight);
push_back(s);
}
class Ftr;
typedef std::vector<Ftr*> vecFtr;
//////////////////////////////////////////////////////////////////////////////////////////////////////////
class FtrParams
{
public:
uint _width, _height;
public:
virtual int
ftrType()=0;
virtual
~FtrParams()
{
}
};
class HaarFtrParams: public FtrParams
{
public:
HaarFtrParams();
uint _maxNumRect, _minNumRect;
int _useChannels[1024];
int _numCh;
public:
virtual int
ftrType()
{
return 0;
}
;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
class Ftr
{
public:
uint _width, _height;
virtual
~Ftr()
{
}
virtual float
compute(const Sample &sample) const =0;
virtual void
generate(FtrParams *params) = 0;
virtual cv::Mat
toViz()
{
return cv::Mat();
}
virtual bool
update(const SampleSet &posx, const SampleSet &negx, const cv::Mat_<float> &posw, const cv::Mat_<float> &negw)
{
return false;
}
static void
compute(SampleSet &samples, const vecFtr &ftrs);
static void
compute(SampleSet &samples, Ftr *ftr, int ftrind);
static vecFtr
generate(FtrParams *params, uint num);
static void
deleteFtrs(vecFtr ftrs);
static void
toViz(vecFtr &ftrs, const char *dirname);
virtual int
ftrType()=0;
};
class HaarFtr: public Ftr
{
public:
uint _channel;
vectorf _weights;
std::vector<cv::Rect> _rects;
vectorf _rsums;
double _maxSum;
public:
//HaarFtr( HaarFtrParams ¶ms );
HaarFtr();
HaarFtr&
operator=(const HaarFtr &a);
float
expectedValue() const;
virtual float
compute(const Sample &sample) const;
virtual void
generate(FtrParams *params);
virtual cv::Mat
toViz();
virtual int
ftrType()
{
return 0;
}
;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
inline float
HaarFtr::compute(const Sample &sample) const
{
if (sample._ii_imgs.empty())
abortError(__LINE__, __FILE__, "Integral image not initialized before called compute()");
cv::Rect r;
float sum = 0.0f;
for (int k = 0; k < (int) _rects.size(); k++)
{
r = _rects[k];
r.x += sample._col;
r.y += sample._row;
sum +=
_weights[k] * (sample._ii_imgs[_channel](r.y + r.height, r.x + r.width)
+ sample._ii_imgs[_channel](r.y, r.x)
- sample._ii_imgs[_channel](r.y + r.height, r.x)
- sample._ii_imgs[_channel](r.y, r.x + r.width)); ///_rsums[k];
}
r.x = sample._col;
r.y = sample._row;
r.width = (int) sample._weight;
r.height = (int) sample._height;
return (float) (sum);
//return (float) (100*sum/sample._img->sumRect(r,_channel));
}
inline HaarFtr&
HaarFtr::operator=(const HaarFtr &a)
{
_width = a._width;
_height = a._height;
_channel = a._channel;
_weights = a._weights;
_rects = a._rects;
_maxSum = a._maxSum;
return (*this);
}
inline float
HaarFtr::expectedValue() const
{
float sum = 0.0f;
for (int k = 0; k < (int) _rects.size(); k++)
{
sum += _weights[k] * _rects[k].height * _rects[k].width * 125;
}
return sum;
}
class ClfWeak;
class ClfStrong;
class ClfAdaBoost;
class ClfMilBoost;
class ClfStrongParams
{
public:
ClfStrongParams()
:
_ftrParams(0),
_weakLearner("stump"),
_lRate(0.85f),
_storeFtrHistory(false)
{
}
virtual
~ClfStrongParams()
{
}
virtual int
clfType()=0; // [0] Online AdaBoost (Oza/Grabner) [1] Online StochBoost_LR [2] Online StochBoost_MIL
public:
FtrParams *_ftrParams;
std::string _weakLearner; // "stump" or "wstump"; current code only uses "stump"
float _lRate; // learning rate for weak learners;
bool _storeFtrHistory;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
class ClfStrong
{
public:
ClfStrongParams *_params;
vecFtr _ftrs;
vecFtr _selectedFtrs;
cv::Mat_<float> _ftrHist;
uint _counter;
public:
virtual
~ClfStrong()
{
}
int
nFtrs()
{
return _ftrs.size();
}
;
// abstract functions
virtual void
init(ClfStrongParams *params)=0;
virtual void
update(SampleSet &posx, SampleSet &negx)=0;
virtual vectorf
classify(SampleSet &x, bool logR = true)=0;
static ClfStrong*
makeClf(ClfStrongParams *clfparams);
static cv::Mat_<float>
applyToImage(ClfStrong *clf, const cv::Mat & img, bool logR = true); // returns a probability map (or log odds ratio map if logR=true)
static void
eval(vectorf ppos, vectorf pneg, float &err, float &fp, float &fn, float thresh = 0.5f);
static float
likl(vectorf ppos, vectorf pneg);
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// IMEPLEMENTATIONS - PARAMS
class ClfAdaBoostParams: public ClfStrongParams
{
public:
int _numSel, _numFeat;
public:
ClfAdaBoostParams()
{
_numSel = 50;
_numFeat = 250;
}
;
virtual int
clfType()
{
return 0;
}
;
};
class ClfMilBoostParams: public ClfStrongParams
{
public:
int _numFeat, _numSel;
public:
ClfMilBoostParams()
{
_numSel = 50;
_numFeat = 250;
}
;
virtual int
clfType()
{
return 1;
}
;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// IMEPLEMENTATIONS - CLF
class ClfAdaBoost: public ClfStrong
{
private:
vectorf _alphas;
vectori _selectors;
std::vector<ClfWeak*> _weakclf;
uint _numsamples;
float _sumAlph;
std::vector<vectorf> _countFPv, _countFNv, _countTPv, _countTNv; //[selector][feature]
ClfAdaBoostParams *_myParams;
public:
ClfAdaBoost()
:
_sumAlph(0),
_myParams(0),
_numsamples(0)
{
}
virtual void
init(ClfStrongParams *params);
virtual void
update(SampleSet &posx, SampleSet &negx);
virtual vectorf
classify(SampleSet &x, bool logR = true);
};
class ClfMilBoost: public ClfStrong
{
private:
vectori _selectors;
std::vector<ClfWeak*> _weakclf;
uint _numsamples;
ClfMilBoostParams *_myParams;
public:
ClfMilBoost()
:
_numsamples(0),
_myParams(0)
{
}
virtual void
init(ClfStrongParams *params);
virtual void
update(SampleSet &posx, SampleSet &negx);
virtual vectorf
classify(SampleSet &x, bool logR = true);
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// WEAK CLF
class ClfWeak
{
public:
ClfWeak();
ClfWeak(int id);
virtual
~ClfWeak()
{
}
virtual void
init()=0;
virtual void
update(SampleSet &posx, SampleSet &negx, const cv::Mat_<float> & posw = cv::Mat_<float>(),
const cv::Mat_<float> & negw = cv::Mat_<float>())=0;
virtual bool
classify(SampleSet &x, int i)=0;
virtual float
classifyF(SampleSet &x, int i)=0;
virtual void
copy(const ClfWeak* c)=0;
virtual vectorb
classifySet(SampleSet &x);
virtual vectorf
classifySetF(SampleSet &x);
float
ftrcompute(const Sample &x)
{
return _ftr->compute(x);
}
;
float
getFtrVal(const SampleSet &x, int i)
{
return (x.ftrsComputed()) ? x.getFtrVal(i, _ind) : _ftr->compute(x[i]);
}
;
protected:
bool _trained;
Ftr *_ftr;
vecFtr *_ftrs;
int _ind;
float _lRate;
ClfStrong *_parent;
friend class ClfAdaBoost;
friend class ClfMilBoost;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
class ClfOnlineStump: public ClfWeak
{
public:
//////////////////////////////////////////////////////////////////////////////////////////////////////
// members
float _mu0, _mu1, _sig0, _sig1;
float _q;
int _s;
float _log_n1, _log_n0;
float _e1, _e0;
public:
//////////////////////////////////////////////////////////////////////////////////////////////////////
// functions
ClfOnlineStump()
:
ClfWeak()
{
init();
}
;
ClfOnlineStump(int ind)
:
ClfWeak(ind)
{
init();
}
;
virtual void
init();
virtual void
update(SampleSet &posx, SampleSet &negx, const cv::Mat_<float> & posw, const cv::Mat_<float> & negw);
virtual bool
classify(SampleSet &x, int i);
virtual float
classifyF(SampleSet &x, int i);
virtual void
copy(const ClfWeak* c);
};
class ClfWStump: public ClfWeak
{
public:
//////////////////////////////////////////////////////////////////////////////////////////////////////
// members
float _mu0, _mu1, _sig0, _sig1;
float _q;
int _s;
float _log_n1, _log_n0;
float _e1, _e0;
public:
//////////////////////////////////////////////////////////////////////////////////////////////////////
// functions
ClfWStump()
:
ClfWeak()
{
init();
}
;
ClfWStump(int ind)
:
ClfWeak(ind)
{
init();
}
;
virtual void
init();
virtual void
update(SampleSet &posx, SampleSet &negx, const cv::Mat_<float> & posw, const cv::Mat_<float> & negw);
virtual bool
classify(SampleSet &x, int i)
{
return classifyF(x, i) > 0;
}
;
virtual float
classifyF(SampleSet &x, int i);
virtual void
copy(const ClfWeak* c);
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
inline vectorb
ClfWeak::classifySet(SampleSet &x)
{
vectorb res(x.size());
for (int k = 0; k < (int) res.size(); k++)
{
res[k] = classify(x, k);
}
return res;
}
inline vectorf
ClfWeak::classifySetF(SampleSet &x)
{
vectorf res(x.size());
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k = 0; k < (int) res.size(); k++)
{
res[k] = classifyF(x, k);
}
return res;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
inline void
ClfOnlineStump::update(SampleSet &posx, SampleSet &negx, const cv::Mat_<float> & posw, const cv::Mat_<float> & negw)
{
float posmu = 0.0, negmu = 0.0;
if (posx.size() > 0)
posmu = static_cast<float>(cv::mean(posx.ftrVals(_ind))[0]);
if (negx.size() > 0)
negmu = static_cast<float>(cv::mean(negx.ftrVals(_ind))[0]);
if (_trained)
{
if (posx.size() > 0)
{
_mu1 = (_lRate * _mu1 + (1 - _lRate) * posmu);
cv::Mat diff = posx.ftrVals(_ind) - _mu1;
_sig1 = _lRate * _sig1 + (1 - _lRate) * static_cast<float>(cv::mean(diff.mul(diff))[0]);
}
if (negx.size() > 0)
{
_mu0 = (_lRate * _mu0 + (1 - _lRate) * negmu);
cv::Mat diff = negx.ftrVals(_ind) - _mu0;
_sig0 = _lRate * _sig0 + (1 - _lRate) * static_cast<float>(cv::mean(diff.mul(diff))[0]);
}
_q = (_mu1 - _mu0) / 2;
_s = sign(_mu1-_mu0);
_log_n0 = std::log(float(1.0f / pow(_sig0, 0.5f)));
_log_n1 = std::log(float(1.0f / pow(_sig1, 0.5f)));
//_e1 = -1.0f/(2.0f*_sig1+1e-99f);
//_e0 = -1.0f/(2.0f*_sig0+1e-99f);
_e1 = -1.0f / (2.0f * _sig1 + std::numeric_limits<float>::min());
_e0 = -1.0f / (2.0f * _sig0 + std::numeric_limits<float>::min());
}
else
{
_trained = true;
if (posx.size() > 0)
{
_mu1 = posmu;
cv::Scalar scal_mean, scal_std_dev;
cv::meanStdDev(posx.ftrVals(_ind), scal_mean, scal_std_dev);
_sig1 = static_cast<float>(scal_std_dev[0] * scal_std_dev[0]) + 1e-9f;
}
if (negx.size() > 0)
{
_mu0 = negmu;
cv::Scalar scal_mean, scal_std_dev;
cv::meanStdDev(negx.ftrVals(_ind), scal_mean, scal_std_dev);
_sig0 = static_cast<float>(scal_std_dev[0] * scal_std_dev[0]) + 1e-9f;
}
_q = (_mu1 - _mu0) / 2;
_s = sign(_mu1-_mu0);
_log_n0 = std::log(float(1.0f / pow(_sig0, 0.5f)));
_log_n1 = std::log(float(1.0f / pow(_sig1, 0.5f)));
//_e1 = -1.0f/(2.0f*_sig1+1e-99f);
//_e0 = -1.0f/(2.0f*_sig0+1e-99f);
_e1 = -1.0f / (2.0f * _sig1 + std::numeric_limits<float>::min());
_e0 = -1.0f / (2.0f * _sig0 + std::numeric_limits<float>::min());
}
}
inline bool
ClfOnlineStump::classify(SampleSet &x, int i)
{
float xx = getFtrVal(x, i);
double log_p0 = (xx - _mu0) * (xx - _mu0) * _e0 + _log_n0;
double log_p1 = (xx - _mu1) * (xx - _mu1) * _e1 + _log_n1;
return log_p1 > log_p0;
}
inline float
ClfOnlineStump::classifyF(SampleSet &x, int i)
{
float xx = getFtrVal(x, i);
double log_p0 = (xx - _mu0) * (xx - _mu0) * _e0 + _log_n0;
double log_p1 = (xx - _mu1) * (xx - _mu1) * _e1 + _log_n1;
return float(log_p1 - log_p0);
}
inline void
ClfOnlineStump::copy(const ClfWeak* c)
{
ClfOnlineStump *cc = (ClfOnlineStump*) c;
_mu0 = cc->_mu0;
_mu1 = cc->_mu1;
_sig0 = cc->_sig0;
_sig1 = cc->_sig1;
_lRate = cc->_lRate;
_e0 = cc->_e0;
_e1 = cc->_e1;
_log_n0 = cc->_log_n0;
_log_n1 = cc->_log_n1;
return;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
inline void
ClfWStump::update(SampleSet &posx, SampleSet &negx, const cv::Mat_<float> & posw, const cv::Mat_<float> & negw)
{
cv::Mat_<float> poswn, negwn;
if ((posx.size() != posw.size().area()) || (negx.size() != negw.size().area()))
abortError(__LINE__, __FILE__, "ClfWStump::update - number of samples and number of weights mismatch");
float posmu = 0.0, negmu = 0.0;
if (posx.size() > 0)
{
poswn = posw / (cv::sum(posw)[0] + 1e-6);
posmu = static_cast<float>(cv::mean(posx.ftrVals(_ind).mul(poswn))[0]);
}
if (negx.size() > 0)
{
negwn = negw / (cv::sum(negw)[0] + 1e-6);
negmu = static_cast<float>(cv::mean(negx.ftrVals(_ind).mul(negwn))[0]);
}
if (_trained)
{
if (posx.size() > 0)
{
_mu1 = (_lRate * _mu1 + (1 - _lRate) * posmu);
cv::Scalar scal_mean, scal_std_dev;
cv::meanStdDev(posx.ftrVals(_ind).mul(poswn), scal_mean, scal_std_dev);
_sig1 = static_cast<float>(_lRate * _sig1 + (1 - _lRate) * scal_std_dev[0] * scal_std_dev[0]);
}
if (negx.size() > 0)
{
_mu0 = (_lRate * _mu0 + (1 - _lRate) * negmu);
cv::Scalar scal_mean, scal_std_dev;
cv::meanStdDev(negx.ftrVals(_ind).mul(negwn), scal_mean, scal_std_dev);
_sig0 = static_cast<float>(_lRate * _sig0 + (1 - _lRate) * scal_std_dev[0] * scal_std_dev[0]);
}
}
else
{
_trained = true;
_mu1 = posmu;
_mu0 = negmu;
cv::Scalar scal_mean, scal_std_dev;
if (negx.size() > 0)
{
cv::meanStdDev(negx.ftrVals(_ind).mul(negwn), scal_mean, scal_std_dev);
_sig0 = static_cast<float>(scal_std_dev[0] * scal_std_dev[0]) + 1e-9f;
}
if (posx.size() > 0)
{
cv::meanStdDev(posx.ftrVals(_ind).mul(poswn), scal_mean, scal_std_dev);
_sig1 = static_cast<float>(scal_std_dev[0] * scal_std_dev[0]) + 1e-9f;
}
}
_log_n0 = std::log(float(1.0f / pow(_sig0, 0.5f)));
_log_n1 = std::log(float(1.0f / pow(_sig1, 0.5f)));
_e1 = -1.0f / (2.0f * _sig1);
_e0 = -1.0f / (2.0f * _sig0);
}
inline float
ClfWStump::classifyF(SampleSet &x, int i)
{
float xx = getFtrVal(x, i);
double log_p0 = (xx - _mu0) * (xx - _mu0) * _e0 + _log_n0;
double log_p1 = (xx - _mu1) * (xx - _mu1) * _e1 + _log_n1;
return (float) (log_p1 - log_p0);
}
inline void
ClfWStump::copy(const ClfWeak* c)
{
ClfWStump *cc = (ClfWStump*) c;
_mu0 = cc->_mu0;
_mu1 = cc->_mu1;
return;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
inline vectorf
ClfAdaBoost::classify(SampleSet &x, bool logR)
{
int numsamples = x.size();
vectorf res(numsamples);
vectorb tr;
// for each selector, accumate in the res vector
for (int sel = 0; sel < (int) _selectors.size(); sel++)
{
tr = _weakclf[_selectors[sel]]->classifySet(x);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int j = 0; j < numsamples; j++)
{
res[j] += tr[j] ? _alphas[sel] : -_alphas[sel];
}
}
// return probabilities or log odds ratio
if (!logR)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int j = 0; j < (int) res.size(); j++)
{
res[j] = sigmoid(2 * res[j]);
}
}
return res;
}
inline vectorf
ClfMilBoost::classify(SampleSet &x, bool logR)
{
int numsamples = x.size();
vectorf res(numsamples);
vectorf tr;
for (uint w = 0; w < _selectors.size(); w++)
{
tr = _weakclf[_selectors[w]]->classifySetF(x);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int j = 0; j < numsamples; j++)
{
res[j] += tr[j];
}
}
// return probabilities or log odds ratio
if (!logR)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int j = 0; j < (int) res.size(); j++)
{
res[j] = sigmoid(res[j]);
}
}
return res;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
inline void
ClfStrong::eval(vectorf ppos, vectorf pneg, float &err, float &fp, float &fn, float thresh)
{
fp = 0;
fn = 0;
for (uint k = 0; k < ppos.size(); k++)
(ppos[k] < thresh) ? fn++ : fn;
for (uint k = 0; k < pneg.size(); k++)
(pneg[k] >= thresh) ? fp++ : fp;
fn /= ppos.size();
fp /= pneg.size();
err = 0.5f * fp + 0.5f * fn;
}
inline float
ClfStrong::likl(vectorf ppos, vectorf pneg)
{
float likl = 0, posw = 1.0f / ppos.size(), negw = 1.0f / pneg.size();
for (uint k = 0; k < ppos.size(); k++)
likl += log(ppos[k] + 1e-5f) * posw;
for (uint k = 0; k < pneg.size(); k++)
likl += log(1 - pneg[k] + 1e-5f) * negw;
return likl;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////
class TrackerParams
{
public:
TrackerParams();
vectori _boxcolor; // for outputting video
uint _lineWidth; // line width
uint _negnumtrain, _init_negnumtrain; // # negative samples to use during training, and init
float _posradtrain, _init_postrainrad; // radius for gathering positive instances
uint _posmaxtrain; // max # of pos to train with
bool _debugv; // displays response map during tracking [kinda slow, but help in debugging]
vectorf _initstate; // [x,y,scale,orientation] - note, scale and orientation currently not used
bool _useLogR; // use log ratio instead of probabilities (tends to work much better)
bool _initWithFace; // initialize with the OpenCV tracker rather than _initstate
bool _disp; // display video with tracker state (colored box)
std::string _vidsave; // filename - save video with tracking box
std::string _trsave; // filename - save file containing the coordinates of the box (txt file with [x y width height] per row)
};
class SimpleTrackerParams: public TrackerParams
{
public:
SimpleTrackerParams();
uint _srchwinsz; // size of search window
uint _negsamplestrat; // [0] all over image [1 - default] close to the search window
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class SimpleTracker
{
public:
SimpleTracker()
:
_cnt(0)
{
}
~SimpleTracker()
{
}
double
track_frame(const cv::Mat & frame); // track object in a frame; requires init() to have been called.
bool
init(const cv::Mat & frame, SimpleTrackerParams p, ClfStrongParams *clfparams);
const cv::Mat_<float> &
getFtrHist() const
{
return _clf->_ftrHist;
} // only works if _clf->_storeFtrHistory is set to true.. mostly for debugging
inline void
getTrackBox(cv::Rect & roi)
{
roi.width = cvRound(_curState[2]);
roi.height = cvRound(_curState[3]);
roi.x = cvRound(_curState[0]);
roi.y = cvRound(_curState[1]);
}
private:
cv::Ptr<ClfStrong> _clf;
vectorf _curState;
SimpleTrackerParams _trparams;
cv::Ptr<ClfStrongParams> _clfparams;
int _cnt;
};
} // namespace mil
} // namespace cv
#endif // #ifndef __OPENCV_ONLINE_MIL_H__
/* End of file. */
|
solve.c | #include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <inttypes.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
#define N_MAX_MOVES 255 // max solution moves (using uint8_t, 0 - zero moves, 255 - no moves)
#define N_MAX_MS_MOVES 31 // max move-set moves (+1 to store length)
#define N_MOVES_SPECIAL 16 // for each move store special moves like reverse, opposite, independent
#define MS_HASH_SIZE 0x80000000 // move-set hash size
#define MS_HASH_MASK (MS_HASH_SIZE-1) // and mask
#define MS_MAX_TABLE_SIZE 0x4000000 // max number of move-sets ~67 mln
#include "data.h" // generated by psolve.py
// Cube string and moves not allowed after previous move (3 maximum)
typedef struct { char c[N_BLOCKS]; uint8_t m0,m1,m2; } CUBE;
// Global variables for better performance & optimization
int step, inSeq, linkStep, linkSol, solutionFound // current step, sequence flag, linked step/solution, stop flag
, maxDepth, maxWeight, bestWeight, newBestWeight // max search depth and weight, best weight values
, bestSolLen, bestSolWeight, bestStep, bestStepSol // sequence of steps best values
, nStepSol[N_STEPS]={0}, stepSolWeight[N_STEPS][N_MAX_SOL] // number of solutions and solutions weights
, moveLen[N_STEPS]= {0}, nMoveLen= 0; // profile steps (sequence of steps) length
uint8_t *amoves0, *amoves1 // allowed moves for the current step and step's last move
, stepSol[N_STEPS][N_MAX_SOL][N_MAX_MOVES+1] // list of solutions for the step
, moveList[N_MAX_MOVES], nMoves= 0; // main list of moves
char revColor[256]= {}, normColor[256]= {}; // reverse colors (F to B, U to D, R to L) and normal (face index)
CUBE cube0; // start cube to solve
// Timer processing in milliseconds & screen printing functions
//
uint64_t solTimeStart= 0, stepTimeStart= 0, seqTimeStart= 0 // start time for step's solution, step and sequence
, moveTime[N_STEPS]= {0}, logStartTimer= 0, logLastTimer= 0; // profile step time; log & progress times
char logbuf[1024]= {0}, stepTimeInfo[N_STEPS][256]= {{0}}; // string buffers for logger
static inline uint64_t timems() { // get time in milliseconds
struct timeval time; gettimeofday(&time,NULL);
return ((uint64_t)time.tv_sec*1000) + (time.tv_usec/1000);
}
static inline void printlog(char *fmt, ...) { // print at the beginning of line with time and arguments like printf
va_list ap; va_start(ap, fmt); // variable arguments processing
uint64_t t= timems();
if (!logStartTimer) { logStartTimer= t; logLastTimer= 0; } // initialize timer
vsprintf(logbuf, fmt, ap);
if (t-logLastTimer>200 || fmt[strlen(fmt)-1]=='\n' || fmt[0]=='@') { // update after 200ms or has \n at the end
int ms= t-logStartTimer, tm=ms/60/1000, ts=ms/1000%60, tms=ms%1000; // minutes, seconds and milliseconds
printf("\33[2K\r%02d:%02d.%03d %s", tm, ts, tms, logbuf); // clear to the start of the line, show time
logLastTimer= t;
}
}
static inline void printprogress(char *fmt, ...) { // append progress status to saved logbuf string
if (timems()-logLastTimer>200) // update after 200ms
#pragma omp critical // outside critical logLastTimer can be changed by other thread
if (timems()-logLastTimer>200) { va_list ap; va_start(ap, fmt); printlog(logbuf); vprintf(fmt, ap); }
}
static inline int timeLimTest(int softCond) { // test time limit for the step, softCond - soft timer condition flag
uint64_t t= timems();
if (stepTime[step] && t>stepTimeStart+stepTime[step]) { // step time is over
#pragma omp critical
if (!stepTimeInfo[step][0]) sprintf(stepTimeInfo[step], "STEP HARD timer stop\n");
return 1;
}
if (solTime[step] && t>solTimeStart+solTime[step] && (solTimeHard[step] || softCond)) { // hard or soft limit
#pragma omp critical
if (!stepTimeInfo[step][0] && linkStep==-1)
sprintf(stepTimeInfo[step], "SOLUTION %s timer stop\n", solTimeHard[step]?"HARD":"SOFT");
return 1;
}
return 0;
}
void error(char *txt) { // write error to file
printf("%s\n", txt);
FILE *f= fopen(".solution","w"); fprintf(f,"%s",txt); fclose(f);
exit(-1);
}
// Operations with the cube - move, turn, check that it is solved, get weight and print it
//
char *moveNames[N_MOVES]= { // name of moves
#include "movenames.h"
};
int moveSpecial[N_MOVES][N_MOVES_SPECIAL]= { // special moves reverse, skip1, skip2, opposite, reverse opposite, ...
#include "specmoves.h"
};
static inline CUBE *moveCube(CUBE *Ca, CUBE *Cb, int move) { // move cube Ca to Cb with the move; set m0,m1,m2 for Cb
char *a= Ca->c, *b= Cb->c;
memcpy(b, a, N_BLOCKS);
switch(move) {
#include "moves.h"
}
return Cb;
}
static inline CUBE *turnCube(CUBE *Ca, CUBE *Cb, int turn) { // turn cube Ca to Cb with the turn
char *a= Ca->c, *b= Cb->c;
memcpy(b,a,N_BLOCKS);
switch(turn) {
#include "turns.h"
}
return Cb;
}
static inline void moveCubeSelf(CUBE *C, int move) { // move cube C to itself; m0,m1,m2 are not set
CUBE C1;
moveCube(C, &C1, move); memcpy(C->c, C1.c, N_BLOCKS);
}
static inline int isSolved(CUBE *pC) { // check if the cube is solved for the step
char *a= pC->c;
switch(step){
#include "solved.h"
}
return 0;
}
static inline int cubeWeight(CUBE *pC) { // get cube's weight for the step
char *a= pC->c;
switch(step){
#include "weight.h"
}
return 0;
}
void printCubeLine(CUBE *pC) { // print the cube string
char *s= pC->c;
if (!strcmp(ALG_NAME,"cube333gear")) { // cube 333 gear special processing
int i, j, edges[24]= {1,3,5,7,37,39,41,43,10,12,14,16,19,21,23,25,46,48,50,52,28,30,32,34};
for(i=0; i<=53; i++) {
for(j=0; j<24; j++) if (edges[j]==i) { // en edge item
if (s[100+i]) printf("%c'",s[100+i]);
else if (s[200+i]) printf("%c\"",s[200+i]);
else if (s[i]) printf("%c",s[i]);
break;
}
if (j==24) printf("%c",s[i]); // not edge - center or corner
}
printf("\n");
}
else printf("%.*s\n", N_BLOCKS, s);
}
void printCube(CUBE *pC) { // print the cube string and colored scheme for rubiks NxNxN
printCubeLine(pC);
char *s= pC->c, cStr[256][8]= {{0}};
for(int i=0; i<256; i++) sprintf(cStr[i], "%c", (char)i);
strcpy(cStr[(int)'r'], "\033[0;41m"); strcpy(cStr[(int)'g'], "\033[0;42m"); strcpy(cStr[(int)'c'], "\033[0;42m");
strcpy(cStr[(int)'y'], "\033[0;43m"); strcpy(cStr[(int)'b'], "\033[0;44m"); strcpy(cStr[(int)'a'], "\033[0;44m");
strcpy(cStr[(int)'o'], "\033[1;45m"); strcpy(cStr[(int)'p'], "\033[1;45m"); strcpy(cStr[(int)'w'], "\033[0;47m");
strcpy(cStr[(int)'d'], "\033[0;47m"); strcpy(cStr[(int)'s'], "\033[0;47m");
int sz= 0;
if (!strcmp(ALG_NAME,"cube555")) sz= 5;
else if (!strcmp(ALG_NAME,"cube444")) sz= 4;
else if (!strcmp(ALG_NAME,"cube333")) sz= 3;
else if (!strcmp(ALG_NAME,"cube222")) sz= 2;
if (sz>0)
for(int i=0; i<sz*3; i++) {
for(int j=0; j<sz*4;j ++) {
int f=-1;
if (i<sz && j/sz==1) f=0;
else if (i>=sz*2 && j/sz==1) f=5;
else if (j<sz && i/sz==1) f=4;
else if (j>=sz && i/sz==1) f= (j-sz)/sz+1;
if (j%sz==0) printf(" ");
char c= s[ f*sz*sz + (i%sz*sz) + (j%sz) ];
if (f>=0) printf("%s%s\033[0m", cStr[(int)c], cStr[(int)c][0]==033?" ":" ");
else printf(" ");
}
printf("\n");
}
}
// Prune table storage
// 1st dimension - prune table by index
// 2nd - array with hash as index and minimum number of moves as value
// using uint32_t instead of size_t, so maximum hash size is 32 bit
//
uint8_t **pruneTable;
uint32_t *pruneCnt;
static inline uint32_t hashFromMask(char *s0, int prunei) { // called from prune.h, useMask flag in .cr file
int n; // string length (mask or full)
char s[N_BLOCKS]; // pointer to the packed cube string
uint32_t hash= 0;
if ((n= nStepMasks[step][prunei])) // mask is not empty - repack
for(int j=0; j<n; j++) s[j]= normColor[s0[ stepMasks[step][prunei][j] ]];
else // mask is empty - use full cube length and string
for(int j=0; j<(n=N_BLOCKS); j++) s[j]= normColor[s0[j]];
for(int i=0; i<n; i++) hash= s[i] + hash*65599u; // fast hash function
return hash & (stepPruneSize[step][prunei]-1); // use bit mask length defined .cr file
}
static inline uint32_t hashCube(CUBE *pC, int prunei) { // calc cube's hash for the step and prune table
char *a= pC->c;
switch(step) {
#include "prune.h"
}
return 0;
}
static inline void addPruneTableValue(CUBE *pC, int depth) { // add depth value to the prune table
for(int j=0; j<nStepPrunes[step]; j++) { // iterate steps' prune tables
uint32_t p= hashCube(pC, j); // calculate prune hash value
uint8_t pd= pruneTable[j][p]; // current table depth value
if (!pd || depth<pd) // empty or better depth
#pragma omp critical // re-check since could be changed by other threads
if (!pruneTable[j][p] || depth<pruneTable[j][p]) { pruneTable[j][p]= depth; pruneCnt[j]++; }
if (nStepPruneSymmetry[step][j]) { // use symmetry => add all possible turns
CUBE C1;
for(int i=0; i<N_TURNS; i++) {
turnCube(pC, &C1, i);
p= hashCube(&C1, j); pd= pruneTable[j][p];
if (!pd || depth<pd) // empty or better depth
#pragma omp critical // re-check since could be changed by other threads
if (!pruneTable[j][p] || depth<pruneTable[j][p]) { pruneTable[j][p]= depth; pruneCnt[j]++; }
}
}
}
}
void buildPruneNext(CUBE *pC, int depth) { // recursive iterations to build prune table till maxDepth
CUBE C1;
for(int i=1; i<=amoves0[0]; i++) { // all allowed moves
int m= amoves0[i]; if (m!=pC->m0 && m!=pC->m1 && m!=pC->m2) { // the move is allowed
moveCube(pC, &C1, m); addPruneTableValue(&C1, depth+1);
if (depth<maxDepth) buildPruneNext(&C1, depth+1);
}
}
}
static inline void addStepSolution(CUBE *pC, uint8_t *mvList, int depth) { // add solution to the step
int addSolution= 1, w= (maxWeight ? cubeWeight(pC) : -1);
if (w>=0) { // max weight mode
if (w>bestWeight) { bestWeight= w; solutionFound= (solutionFound || w>=maxWeight); } // better weight
else addSolution= 0; // skip
}
if (addSolution) {
int ind=1, sol= nStepSol[step];
if (linkStep>=0) { // copy start of move list from linked step
ind+= stepSol[linkStep][linkSol][0];
memcpy(stepSol[step][sol]+1, stepSol[linkStep][linkSol]+1, ind-1);
}
memcpy(stepSol[step][sol]+ind, mvList, depth+1); // save the solution
stepSol[step][sol][0]= ind+depth; stepSolWeight[step][sol]= w; // with length & weight
if (linkStep==-1) { // in single step increase solutions counter till limit
if (++nStepSol[step]==stepMaxSol[step]) solutionFound= 1;
}
else if (!maxWeight) solutionFound= 1; // in sequence mode finish after first solution
}
}
void tryLayerNext(CUBE *pC, int depth, uint8_t *mvList) { // search layer iteration to maxDepth with depth
CUBE C1;
if (depth<maxDepth) // outer if like this works faster
for(int i=1; i<=amoves0[0]; i++) { if (solutionFound) return; // all allowed moves stop when solution found
uint8_t m= amoves0[i]; if (m!=pC->m0 && m!=pC->m1 && m!=pC->m2) { // the move is allowed
moveCube(pC, &C1, mvList[depth]= m); // make & save the move
int deep= depth<maxDepth-stepHashDepth[step]-1; // should go deeper flag
if (!deep) {
deep= 1;
for(int j=0; j<nStepPrunes[step]; j++) { // iterate prune tables and check min depth
uint8_t pd= pruneTable[j][ hashCube(&C1, j) ];
if (!pd || depth+pd>maxDepth) { deep= 0; break; }
}
}
if (deep) tryLayerNext(&C1, depth+1, mvList); // go to the next layer
}
}
else // on the last layer check if the cube could be solved
for(int i=1; i<=amoves1[0]; i++) { if (solutionFound) return; // all allowed moves stop when solution found
uint8_t m= amoves1[i]; if (m!=pC->m0 && m!=pC->m1 && m!=pC->m2) { // the move is allowed
moveCube(pC, &C1, mvList[depth]= m); // make & save the move
if (isSolved( &C1 )) // we have the solution !!! => save it
#pragma omp critical
addStepSolution(&C1, mvList, depth);
}
}
}
int pruneTableMain(CUBE *pC, int buildPrune) { // start the search or build prune table, use omp parallel
int showPercentCounter= 0;
solutionFound= 0;
if (maxDepth>1) { // 2 and more layers
uint8_t *fmoves= buildPrune ? amoves1 : amoves0; // in building mode use last move as first one
#pragma omp parallel for schedule(dynamic,1) collapse(2) // expand two first moves
for(int i1=1; i1<=fmoves[0]; i1++) for(int i2=1; i2<=amoves0[0]; i2++) { if (solutionFound) continue;
int tlcond= linkStep==-1 ? nStepSol[step] : stepSol[step][nStepSol[step]][0]<N_MAX_MOVES;
if (!buildPrune && timeLimTest(tlcond)) { solutionFound= 1; continue; } // stop conditions
printprogress("%.1lf%%", 100.0 * ++showPercentCounter / fmoves[0] / amoves0[0]);
uint8_t mvList[N_MAX_MOVES]; // move list array for each thread
uint8_t m1= fmoves[i1]; CUBE C1; moveCube(pC, &C1, mvList[0]= m1); // make & save move1
if (buildPrune) addPruneTableValue(&C1, 1); // build for 1st move
uint8_t m2= amoves0[i2]; if (m2!=C1.m0 && m2!=C1.m1 && m2!=C1.m2) { // move2 could be done after move1
CUBE C2; moveCube(&C1, &C2, mvList[1]= m2); // make & save move2
if (buildPrune) { addPruneTableValue(&C2, 1); buildPruneNext(&C2, 2); } // build prune table
else tryLayerNext(&C2, 2, mvList); // start the search
}
}
}else{ // 0 or 1 layer
uint8_t mvList[N_MAX_MOVES];
if (buildPrune) buildPruneNext(pC, 0); // build prune table
else tryLayerNext(pC, 0, mvList); // start the search
}
printprogress("100%%");
return solutionFound;
}
void initPrune() { // initialize prune table - prepare memory and try to read from cache
if (stepHashDepth[step] && nStepPrunes[step]) {
char fn[N_MAX_PRUNES][256], buf[256], buf2[32];
pruneTable= calloc(nStepPrunes[step], sizeof(uint8_t*));
pruneCnt= calloc(nStepPrunes[step], sizeof(uint32_t));
if (!pruneTable || !pruneCnt) error("initPrune. Not enough memory");
for(int i=0; i<nStepPrunes[step]; i++) {
pruneTable[i]= calloc(stepPruneSize[step][i], sizeof(uint8_t));
if (!pruneTable[i]) error("initPrune. Not enough memory");
}
int fileFail= 0;
for(int i=0; i<nStepPrunes[step]; i++) {
sprintf(fn[i], "data/%s.%s", ALG_NAME, stepMd5[step][i]);
FILE *f= fopen(fn[i],"r");
if (f) {
uint32_t p=0; uint8_t n=0;
while( fread_unlocked(&p,sizeof(uint32_t),1,f) && fread_unlocked(&n,sizeof(uint8_t),1,f) ) {
pruneTable[i][p]= n; pruneCnt[i]++;
}
fclose(f);
}
else fileFail= 1;
}
if (fileFail) { // if no cache file => build and save prune table from zero cube
maxDepth= stepHashDepth[step];
CUBE zero= { m0:-1, m1:-1, m2:-1 }; memcpy(zero.c, zeroCube, strlen(zeroCube));
printlog("building prune tables "); logLastTimer= 0; pruneTableMain(&zero, 1);
for(int i=0; i<nStepPrunes[step]; i++) { // write prune table to cache, binary format for faster reading
FILE *f= fopen(fn[i],"w");
for(uint32_t j=0; j<stepPruneSize[step][i]; j++) if (pruneTable[i][j]) {
fwrite(&j, sizeof(uint32_t), 1, f); fwrite(&pruneTable[i][j], sizeof(uint8_t), 1, f);
}
fclose(f);
}
stepTimeStart= timems(); // restart step timer
}
*buf= 0;
for(int i=0; i<nStepPrunes[step]; i++) {
float sz= stepPruneSize[step][i]/1048576.0, pr= 100.0*pruneCnt[i]/stepPruneSize[step][i];
sprintf(buf2, " | %.1fmb/%.2f%%", sz, pr); strcat(buf, buf2);
}
printlog("nPruneTables=%d depth=%d%s\n", nStepPrunes[step], stepHashDepth[step], buf);
}
}
void doPruneStep(CUBE *pC) { // search with prune tables
char stepStr[16]; linkStep>=0 ? sprintf(stepStr, "%d=>%d", step, linkStep) : sprintf(stepStr, "%d", step);
printlog("STEP %s PRUNE TABLE SEARCH%s timeLim=%dms/%s,%dms/step nMoves=%d/%d depth=%d-%d maxSol=%d\n",
stepStr, (maxWeight ? " + WEIGHT" : ""), solTime[step], solTimeHard[step]?"hard":"soft", stepTime[step],
amoves0[0], amoves1[0], stepStartDepth[step], stepMaxDepth[step], stepMaxSol[step]);
initPrune();
int nSol= stepMaxSol[step], solNum= -1;
if (linkStep==-1) { // Single step
solTimeStart= timems(); bestWeight= -1; nStepSol[step]= 0;
for(maxDepth= stepStartDepth[step]; maxDepth<stepMaxDepth[step]; maxDepth++) {
if (bestWeight>=0) printlog("bestWeight=%d trying %d moves ", bestWeight, maxDepth+1);
else printlog("trying %d moves ", maxDepth+1);
pruneTableMain(pC, 0); // run prune table search
if (timeLimTest(nStepSol[step])) break; // solution time limit
if (solutionFound || nStepSol[step]==stepMaxSol[step]) break; // search is finished OR solutions limit
}
}else{ // Linked step
if (stepMaxSol[step]==1 || stepMaxSol[step]>nStepSol[linkStep]) nSol= stepMaxSol[step]= nStepSol[linkStep];
else if (stepMaxSol[step]==-1) nSol= 1;
nStepSol[step]= 0; // in linked step use one record for the best solution
if (stepLinkLocal[step]) bestSolLen= -1;
for(solNum= 0; solNum<nSol && !timeLimTest(0); solNum++) {
if (nSol>1) linkSol= nStepSol[linkStep]/nSol*solNum;
else if (stepMaxSol[step]==-1) linkSol= nStepSol[linkStep]-1;
else linkSol= solNum;
CUBE C1= *pC; C1.m0= C1.m1= C1.m2= -1;
if (stepSol[linkStep][linkSol][0]==N_MAX_MOVES) continue; // no solution in linked step
for(int i=1; i<=stepSol[linkStep][linkSol][0]; i++) // update cube to the linkStep/linkSol position
moveCubeSelf(&C1, stepSol[linkStep][linkSol][i]);
stepSol[step][nStepSol[step]][0]= N_MAX_MOVES; // means no solution
int endDepth= (bestSolLen>=0 && !maxWeight) ? // adjust depth if searching better solution length
bestSolLen-stepSol[linkStep][linkSol][0]-1 : stepMaxDepth[step];
solTimeStart= timems(); bestWeight= -1;
for(maxDepth= stepStartDepth[step]; maxDepth<endDepth && !timeLimTest(bestSolLen>=0); maxDepth++) {
printlog("solution %d/%d: trying %d moves ", solNum+1, nSol, maxDepth+1);
if (pruneTableMain(&C1, 0)) break; // run prune table search
}
// Update bestSolLen, bestSolWeight, bestStep & bestStepSol for the sequence of steps
int newSolW= stepSolWeight[step][ nStepSol[step] ],
newSolL= stepSol[step][ nStepSol[step] ][0]<N_MAX_SOL ? stepSol[step][ nStepSol[step] ][0] : -1;
if (maxWeight) { // max weight mode
if (bestSolLen==-1 || newSolW>bestSolWeight || (newSolW==bestSolWeight && newSolL<bestSolLen)) {
bestSolLen= newSolL; bestSolWeight= newSolW; bestStep= step; bestStepSol= nStepSol[step];
printlog("solution #%d=>%d: weight=%d, moves=%d\n", solNum, linkSol, newSolW, newSolL);
}
}else{ // minimum length mode
if ((bestSolLen==-1 || newSolL<bestSolLen) && stepSol[step][nStepSol[step]][0]<N_MAX_MOVES) {
bestSolLen= newSolL; bestSolWeight= newSolW; bestStep= step; bestStepSol= nStepSol[step];
printlog("solution #%d=>%d: moves=%d\n", solNum, linkSol, stepSol[bestStep][bestStepSol][0]);
}
}
if (stepSol[step][nStepSol[step]][0]!=N_MAX_MOVES) nStepSol[step]++; // increment if has a solution
}
}
if (stepTimeInfo[step][0]) printlog(stepTimeInfo[step]); // show time limit info if happened
// show step results
char sMoves[256]= {0}, sWeight[256]= {0}, resText[256]= {0};
int minM= N_MAX_MOVES, maxM= 0, minW= stepSolWeight[step][0], maxW= minW, cnt= nStepSol[step];
for(int i=0; i<cnt; i++) {
if (stepSol[step][i][0]<minM) minM= stepSol[step][i][0];
if (stepSol[step][i][0]>maxM) maxM= stepSol[step][i][0];
if (stepSolWeight[step][i]<minW) minW= stepSolWeight[step][i];
if (stepSolWeight[step][i]>maxW) maxW= stepSolWeight[step][i];
}
if (cnt) {
if (minM==maxM) sprintf(sMoves, ", %d moves", minM); else sprintf(sMoves, ", %d-%d moves", minM, maxM);
if (maxWeight) sprintf(sWeight, " width weight %d-%d", minW, maxW);
sprintf(resText, "%d solution%s found%s%s", cnt, cnt>1?"s":"", sMoves, sWeight);
}else
sprintf(resText, "no solution");
linkStep==-1 ? printlog("%s\n",resText) : printlog("linked solutions tested %d/%d, %s\n", solNum, nSol, resText);
if (stepHashDepth[step] && nStepPrunes[step]) { // free memory for hash list table or prune table
for(int i=0; i<nStepPrunes[step]; i++) free(pruneTable[i]);
free(pruneTable); free(pruneCnt);
}
}
// The Trick: try to add 1-2-3-4 moves to current moveList, keep solved state and try to increase weight
//
int bestWTmoves[4], bestWTpos[4], bestWTlen, bestWTweight; // weight trick data structures
void weightTrickCheck(CUBE *pC, int wtLen, int *wtMoves, int *wtPos, int ind, int *pd, int *dd) {
CUBE C= *pC;
for(int p= (ind>0 ? wtPos[ind-1] : 0); p<wtPos[ind]; p++) moveCubeSelf(&C, moveList[p]);
moveCubeSelf(&C, wtMoves[ind]);
if (ind==wtLen-1) {
for(int p=wtPos[ind]; p<nMoves; p++) moveCubeSelf(&C, moveList[p]);
if (isSolved( &C )) {
int w= cubeWeight( &C );
#pragma omp critical
if (w>bestWTweight && (float)wtLen/(w-bestWTweight) <= stepParams[step][4]) {
for(int j=0; j<wtLen; j++) { bestWTpos[j]= wtPos[j]; bestWTmoves[j]= wtMoves[j]; }
bestWTweight= w; bestWTlen= wtLen;
}
}
}else
for(wtPos[ind+1]= wtPos[ind]+pd[ind]; wtPos[ind+1]<nMoves && wtPos[ind+1]<wtPos[ind]+dd[ind]; wtPos[ind+1]++)
weightTrickCheck(&C, wtLen, wtMoves, wtPos, ind+1, pd, dd);
}
void doWeightTrick(CUBE *pC) {
int par= (int)stepParams[step][3];
uint8_t *am= stepMoves[step][0][0];
bestWTweight= cubeWeight(pC);
while(1) {
bestWTlen= -1;
if (par & 1) { // one move flag
#pragma omp parallel for schedule(dynamic,1) collapse(2)
for(int m= 1; m<=am[0]; m++) for(int p= 0; p<nMoves; p++) {
int wtMoves[1]= { m }, wtPos[1]= { p };
weightTrickCheck(&cube0, 1, wtMoves, wtPos, 0, NULL, NULL);
}
}
if (par & 2) { // two moves flag
int dd[1]= { stepParams[step][5] }; // ddX - max distance between moves
#pragma omp parallel for schedule(dynamic,1) collapse(2)
for(int m0i=1; m0i<=am[0]; m0i++) for(int m1i=0; m1i<=am[0]; m1i++) {
int m0= am[m0i], m1= am[m1i]
, p1d= ( m1==m0 || m1==moveSpecial[m0][0] || m1==moveSpecial[m0][2] )
, pd[1]= { p1d }, wtMoves[2]= { m0, m1 }, wtPos[2];
for(wtPos[0]= 0; wtPos[0]<nMoves; wtPos[0]++)
weightTrickCheck(&cube0, 2, wtMoves, wtPos, 0, pd, dd);
}
}
if (par & 4) { // three moves flag
int dd[2]= { stepParams[step][6], stepParams[step][7] }; // ddX - max distance between moves
#pragma omp parallel for schedule(dynamic,1) collapse(2)
for(int m0i= 1; m0i<=am[0]; m0i++) for(int m1i=1; m1i<=am[0]; m1i++) {
int m0= am[m0i], m1= am[m1i], m2= moveSpecial[m0][0]
, p1d= ( m1==m0 || m1==moveSpecial[m0][0] || m1==moveSpecial[m0][2] )
, p2d= ( m2==m1 || m2==moveSpecial[m1][0] || m2==moveSpecial[m1][2] )
, pd[2]= { p1d, p2d }, wtMoves[3]= { m0, m1, m2 }, wtPos[3];
for(wtPos[0]= 0; wtPos[0]<nMoves; wtPos[0]++)
weightTrickCheck(&cube0, 3, wtMoves, wtPos, 0, pd, dd);
}
dd[0]= stepParams[step][8]; dd[1]= stepParams[step][9];
#pragma omp parallel for schedule(dynamic,1) collapse(2)
for(int m0i= 1; m0i<=am[0]; m0i++) for(int m2i=1; m2i<=am[0]; m2i++) {
int m0= am[m0i], m1= moveSpecial[m0][0], m2= am[m2i], p1d= 1
, p2d= ( m2==m1 || m2==moveSpecial[m1][0] || m2==moveSpecial[m1][2] )
, pd[2]= { p1d, p2d }, wtMoves[3]= { m0, m1, m2 }, wtPos[3];
for(wtPos[0]= 0; wtPos[0]<nMoves; wtPos[0]++)
weightTrickCheck(&cube0, 3, wtMoves, wtPos, 0, pd, dd);
}
}
if (par & 8) { // four moves flag
int dd[3]= { stepParams[step][10], stepParams[step][11], stepParams[step][12] };
#pragma omp parallel for schedule(dynamic,1) collapse(2)
for(int m0i= 1; m0i<=am[0]; m0i++) for(int m1i=1; m1i<=am[0]; m1i++) {
int m0= am[m0i], m1= am[m1i], m2= moveSpecial[m1][0], m3= moveSpecial[m0][0]
, p1d= ( m1==m0 || m1==moveSpecial[m0][0] || m1==moveSpecial[m0][2] ), p2d= 1
, p3d= ( m3==m2 || m3==moveSpecial[m2][0] || m3==moveSpecial[m2][2] )
, pd[3]= { p1d, p2d, p3d }, wtMoves[4]= { m0, m1, m2, m3 }, wtPos[4];
for(wtPos[0]= 0; wtPos[0]<nMoves; wtPos[0]++)
weightTrickCheck(&cube0, 4, wtMoves, wtPos, 0, pd, dd);
}
dd[0]= stepParams[step][13]; dd[1]= stepParams[step][14]; dd[2]= stepParams[step][15];
#pragma omp parallel for schedule(dynamic,1) collapse(2)
for(int m0i= 1; m0i<=am[0]; m0i++) for(int m1i=1; m1i<=am[0]; m1i++) {
int m0= am[m0i], m1= am[m1i], m2= moveSpecial[m0][0], m3= moveSpecial[m1][0]
, p1d= ( m1==m0 || m1==moveSpecial[m0][0] || m1==moveSpecial[m0][2] )
, p2d= ( m2==m1 || m2==moveSpecial[m1][0] || m2==moveSpecial[m1][2] )
, p3d= ( m3==m2 || m3==moveSpecial[m2][0] || m3==moveSpecial[m2][2] )
, pd[3]= { p1d, p2d, p3d }, wtMoves[4]= { m0, m1, m2, m3 }, wtPos[4];
for(wtPos[0]= 0; wtPos[0]<nMoves; wtPos[0]++)
weightTrickCheck(&cube0, 4, wtMoves, wtPos, 0, pd, dd);
}
}
if (bestWTlen>=0) {
int moveListNew[N_MAX_MOVES], nMovesPrev= nMoves; nMoves= 0;
for(int i=0; i<nMovesPrev; i++) {
for(int j=0; j<bestWTlen; j++) if (i==bestWTpos[j]) moveListNew[nMoves++]= bestWTmoves[j];
moveListNew[nMoves++]= moveList[i];
}
for(int i=0; i<nMoves; i++) moveList[i]=moveListNew[i];
}
else break;
}
*pC= cube0; for(int i=0; i<nMoves; i++) moveCubeSelf(pC,moveList[i]);
}
// Weight iterations
// msTable - move-sets table block size is msTableBlock, first byte is the number of moves
// msTransTable - transformation table for the regular move-sets
// msLastChance - like msTable but build from templates and used without transformations
// msHash - hash table, used to avoid same cube position with different move-sets, collisions possible
//
uint8_t *msTable, *msTransTable, *msLastChance, *msHash;
int msTableLen, msTableBlock, msLastChanceLen, msLastChanceBlock // tables lengths and block sizes
, nStack, stackSize, stack[N_MAX_SOL][N_MAX_MOVES+1]; // stack of solutions for weight step
float moveWeight, solMoveWeight[N_MAX_SOL], stackMoveWeight[N_MAX_SOL]; // weights current, solution's and stack's
static inline uint32_t msHashCube(CUBE *pC) {
uint32_t hash= 0;
for(int i=0; i<N_BLOCKS; i++) hash= pC->c[i] + hash*65599u;
return (hash & MS_HASH_MASK);
}
static inline void buildMsAddMoveList(CUBE *pC, int len, uint8_t *mvList, int useLastChance) {
CUBE C1, C2, C3;
uint32_t hash= msHashCube(pC);
#pragma omp critical
if (!msHash[hash]) {
msHash[hash]= 1;
int smLen= stepMoves[step][0][0][0]; // use 'soft' moves (move-set index 0)
if (maxDepth<stepMaxDepth[step]-1) for(int i1=1; i1<=smLen; i1++) {
msHash[ msHashCube( moveCube(pC, &C1, stepMoves[step][0][0][i1]) ) ]= 1;
if (maxDepth<stepMaxDepth[step]-2) for(int i2=1; i2<=smLen; i2++) {
msHash[ msHashCube( moveCube(&C1, &C2, stepMoves[step][0][0][i2]) ) ]= 1;
if (maxDepth<stepMaxDepth[step]-3) for(int i3=1; i3<=smLen; i3++)
msHash[ msHashCube( moveCube(&C2, &C3, stepMoves[step][0][0][i2]) ) ]= 1;
}
}
if (useLastChance) {
if (msLastChanceLen==MS_MAX_TABLE_SIZE) error("Increase MS_MAX_TABLE_SIZE");
msLastChance[ msLastChanceLen*msLastChanceBlock ]= len;
memcpy( msLastChance + msLastChanceLen*msLastChanceBlock + 1, mvList, len);
msLastChanceLen++;
}else{
if (msTableLen==MS_MAX_TABLE_SIZE) error("Increase MS_MAX_TABLE_SIZE");
msTable[ msTableLen*msTableBlock ]= len;
memcpy( msTable + msTableLen*msTableBlock + 1, mvList, len);
msTableLen++;
}
}
}
static inline int processTmpMove(uint8_t *ms, uint8_t *mvList) { // process templates moves
switch(*ms){
case 255: return mvList[ *(ms+1) ]; // '='' same move
case 254: return moveSpecial[ mvList[*(ms+1)] ][0]; // '^' reverse move
case 253: return moveSpecial[ mvList[*(ms+1)] ][4]; // '$' opposite move
case 252: return moveSpecial[ mvList[*(ms+1)] ][5]; // '#' reverse opposite move
}
return *ms;
}
// build move-set template iteration
void buildTmpMsNext(CUBE *pC, int depth, uint8_t *mvList, uint8_t mv[N_MAX_MS_MOVES][N_MOVES+1], int useLastChance) {
CUBE C1;
for(int i=1; i<=mv[depth][0]; i++) {
int m= processTmpMove( mv[depth]+i, mvList );
if (m!=pC->m0 && m!=pC->m1 && m!=pC->m2) {
moveCube(pC, &C1, mvList[depth]= m);
if (depth<maxDepth) buildTmpMsNext(&C1, depth+1, mvList, mv, useLastChance);
else if (isSolved(&C1)) buildMsAddMoveList(&C1, depth+1, mvList, useLastChance); // last move
}
}
}
void buildMsNext(CUBE *pC, int depth, uint8_t *mvList) { // build move-set iteration
CUBE C1;
if (depth<maxDepth)
for(int m=0; m<N_MOVES; m++) { // use all moves
if (m!=pC->m0 && m!=pC->m1 && m!=pC->m2) {
moveCube(pC, &C1, mvList[depth]= m);
buildMsNext(&C1, depth+1, mvList);
}
}
else // last move
for(int i=1; i<=stepMoves[step][0][1][0]; i++) { // use 'hard' moves (index 1)
int m= stepMoves[step][0][1][i];
if (m!=pC->m0 && m!=pC->m1 && m!=pC->m2) {
moveCube(pC, &C1, mvList[depth]= m);
if (isSolved(&C1) && cubeWeight(&C1)!=bestWeight) buildMsAddMoveList(&C1, depth+1, mvList, 0);
}
}
}
void buildMs(char *fn) { // build move-set, use file name to write it out
printlog("building move-sets "); logLastTimer= 0;
msTableLen= msLastChanceLen= msLastChanceBlock= 0;
msTableBlock= stepMaxDepth[step]+1;
for(int msi= 1; msi<1+stepParams[step][2]; msi++) // find max length for template move-sets
if (stepMoveSetsLen[step][msi]>=msTableBlock) msTableBlock= stepMoveSetsLen[step][msi]+1;
for(int msi= 1+stepParams[step][2]; msi<nStepMoveSets[step]; msi++) // find max length for last chance move-sets
if (stepMoveSetsLen[step][msi]>=msLastChanceBlock) msLastChanceBlock= stepMoveSetsLen[step][msi]+1;
msHash= calloc( MS_HASH_SIZE, sizeof(uint8_t) );
msTable= calloc( MS_MAX_TABLE_SIZE, msTableBlock );
msLastChance= calloc( MS_MAX_TABLE_SIZE, msLastChanceBlock );
if (!msHash || !msTable || !msLastChance) error("buildMs. Not enough memory\n");
CUBE zero= { m0:-1, m1:-1, m2:-1 }; memcpy(zero.c, zeroCube, N_BLOCKS); // start with zero cube
bestWeight= cubeWeight(&zero); // should be max_weight
int startDepth= stepStartDepth[step]-1; if (startDepth<3) startDepth= 3; // move-sets makes sense from 3 moves
for(maxDepth=startDepth; maxDepth<stepMaxDepth[step]; maxDepth++) {
int percentCnt= 0;
#pragma omp parallel for schedule(dynamic,1) collapse(2) // expand two first moves
for(int m1=0; m1<N_MOVES; m1++) for(int m2=0; m2<N_MOVES; m2++) {
printprogress("depth=%d len=%d %.1lf%%", maxDepth+1, msTableLen, 100.0* ++percentCnt/N_MOVES/N_MOVES );
uint8_t mvList[N_MAX_MS_MOVES]; // move list array for each thread
CUBE C1; moveCube(&zero, &C1, mvList[0]= m1); // make move 1
if (m2!=C1.m0 && m2!=C1.m1 && m2!=C1.m2) { // move 2 could be done after move 1
CUBE C2; moveCube(&C1, &C2, mvList[1]= m2); // make move 2
buildMsNext(&C2, 2, mvList);
}
}
}
memset(msHash, 0, MS_HASH_SIZE);
for(int msi= 1; msi<1+stepParams[step][2]; msi++) if ((maxDepth= stepMoveSetsLen[step][msi]-1)>=2) { // 3+ moves
int percentCnt= 0, nMoves1= stepMoves[step][msi][0][0], nMoves2= stepMoves[step][msi][1][0];
#pragma omp parallel for schedule(dynamic,1) collapse(2) // expand two first moves from template
for(int i1=1; i1<=nMoves1; i1++) for(int i2=1; i2<=nMoves2; i2++) {
printprogress("set=%d len=%d %.1lf%%", msi, msLastChanceLen, 100.0* ++percentCnt/nMoves1/nMoves2 );
int m1= stepMoves[step][msi][0][i1], m2= stepMoves[step][msi][1][i2];
uint8_t mvList[N_MAX_MOVES]; // move list array for each thread
CUBE C1; moveCube(&zero, &C1, mvList[0]= m1); // make move 1
if (m2!=C1.m0 && m2!=C1.m1 && m2!=C1.m2) { // move 2 could be done after move 1
CUBE C2; moveCube(&C1, &C2, mvList[1]= m2); // make move 2
buildTmpMsNext(&C2, 2, mvList, stepMoves[step][msi], 0);
}
}
}
memset(msHash, 0, MS_HASH_SIZE);
for(int msi= 1+stepParams[step][2]; msi<nStepMoveSets[step]; msi++)
if ((maxDepth= stepMoveSetsLen[step][msi]-1)>=2) { // from 3 moves
int percentCnt= 0, nMoves1= stepMoves[step][msi][0][0], nMoves2= stepMoves[step][msi][1][0];
#pragma omp parallel for schedule(dynamic,1) collapse(2) // expand two first moves from template
for(int i1=1; i1<=nMoves1; i1++) for(int i2=1; i2<=nMoves2; i2++) {
printprogress("set=%d len=%d %.1lf%%", msi, msLastChanceLen, 100.0* ++percentCnt/nMoves1/nMoves2 );
int m1= stepMoves[step][msi][0][i1], m2= stepMoves[step][msi][1][i2];
uint8_t mvList[N_MAX_MOVES]; // move list array for each thread
CUBE C1; moveCube(&zero, &C1, mvList[0]= m1); // make move 1
if (m2!=C1.m0 && m2!=C1.m1 && m2!=C1.m2) { // move 2 could be done after move 1
CUBE C2; moveCube(&C1, &C2, mvList[1]= m2); // make move 2
buildTmpMsNext(&C2, 2, mvList, stepMoves[step][msi], 1);
}
}
}
FILE *f= fopen(fn, "w"); // write out move-sets
fwrite(&msTableLen, sizeof(int), 1, f); fwrite(&msTableBlock, sizeof(int), 1, f);
fwrite(&msLastChanceLen, sizeof(int), 1, f); fwrite(&msLastChanceBlock, sizeof(int), 1, f);
fwrite(msTable, msTableBlock, msTableLen, f); fwrite(msLastChance, msLastChanceBlock, msLastChanceLen, f);
fclose(f);
free(msHash);
stepTimeStart= timems(); // restart step timer
}
void initMs(int cw) { // initialize move-sets and template move-sets storage
char fn[256]; sprintf(fn, "data/%s.%s", ALG_NAME, stepMd5[step][0]);
int fileFail= 1; msTable= msLastChance= NULL;
FILE *f= fopen(fn, "r");
if (f) {
int cnt= 0;
cnt+= fread_unlocked(&msTableLen, sizeof(int), 1, f);
cnt+= fread_unlocked(&msTableBlock, sizeof(int), 1, f);
cnt+= fread_unlocked(&msLastChanceLen, sizeof(int), 1, f);
cnt+= fread_unlocked(&msLastChanceBlock, sizeof(int), 1, f);
if (cnt==4) {
msTable= calloc( msTableLen, msTableBlock );
msLastChance= calloc( msLastChanceLen, msLastChanceBlock );
if (!msTable || !msLastChance) error("initMs. Not enough memory");
if ((fread_unlocked(msTable, msTableBlock, msTableLen, f)==msTableLen) &&
(fread_unlocked(msLastChance, msLastChanceBlock, msLastChanceLen, f)==msLastChanceLen)) fileFail= 0;
else { free(msTable); free(msLastChance); }
}
fclose(f);
}
if (fileFail) buildMs(fn);
printlog("moveSets=%d building transformations...", msTableLen);
msTransTable= calloc( msTableLen, N_BLOCKS );
if (!msTransTable) error("initMs. Not enough memory");
CUBE C0= { m0:-1, m1:-1, m2:-1 }; for(int j=0; j<N_BLOCKS; j++) C0.c[j]= j;
#pragma omp parallel for schedule(dynamic,1024) collapse(1)
for(int i=0; i<msTableLen; i++) {
CUBE C= C0;
for(int j=1; j<=msTable[ i*msTableBlock ]; j++) moveCubeSelf(&C, msTable[ i*msTableBlock + j ]);
for(int j=0; j<N_BLOCKS; j++) msTransTable[ i*N_BLOCKS + j ]= C.c[ j ];
}
char winfo[1024]= {0}; if (linkStep==-1) sprintf(winfo, "startWeight=%d ", cw);
printlog("%stargetWeight=%d moveSets=%d lastChance=%d\n", winfo, maxWeight, msTableLen, msLastChanceLen);
}
static inline void addWeightSol(int pos, uint8_t *ms, float mw) { // pos=0 to last position, pos=-1 replace worst
static int solPosIterator= -1; // global sol position iterator to vary the position
int solPos= -1; // local position pointer
if (nStepSol[step]==stepMaxSol[step]) { // no space in sol list
if (pos) { // find worst position excluding last one with static iterator to vary the position
for(int i=0; i<nStepSol[step]-1; i++) {
if (++solPosIterator >= nStepSol[step]-1) solPosIterator= 0;
if (solMoveWeight[solPosIterator] >= mw) { solPos= solPosIterator; break; }
}
}else{ // find worst position with maximum moveWeight
float maxW= solMoveWeight[solPos= 0];
for(int i=1; i<nStepSol[step]; i++) if (solMoveWeight[i]>maxW) maxW= solMoveWeight[solPos= i];
for(int i=solPos+1; i<nStepSol[step]; i++) { // move solutions list up from solPos till end
solMoveWeight[i-1]= solMoveWeight[i];
memcpy(stepSol[step][i-1], stepSol[step][i], stepSol[step][i][0]+1);
}
solPos= nStepSol[step]-1;
}
}else{ // have space for new solution
solPos= nStepSol[step]++; // increment solutions size and set to last position
if (pos) { // keep last element and set pointer to last-1
solMoveWeight[solPos]= solMoveWeight[solPos-1];
memcpy(stepSol[step][solPos], stepSol[step][solPos-1], stepSol[step][solPos-1][0]+1);
solPos--;
}
}
if (solPos>=0) { solMoveWeight[solPos]= mw; memcpy(stepSol[step][solPos], ms, ms[0]+1); } // add new solution
}
static inline void checkMoveSet(CUBE *pC, uint8_t *ms) { // check if the move-set brings to better weight
int w= cubeWeight(pC);
if (w>bestWeight)
#pragma omp critical
{
float mw= (float)*ms/(w-bestWeight);
if (mw <= moveWeight) { // better moveWeight => add to single position in stepSol
moveWeight= mw; newBestWeight= w;
addWeightSol(0, ms, mw);
if (w==maxWeight) solutionFound= 1; // target achieved
}
else if (mw <= stepParams[step][0]) addWeightSol(-1, ms, mw);
}
}
static inline void checkMoveSets(CUBE *pC, int maxLen) { // check move-sets with msTransTable
int w= cubeWeight(pC);
bestWeight= newBestWeight= w; moveWeight= 1e9; nStepSol[step]= 0; solutionFound= 0;
#pragma omp parallel for schedule(dynamic,1024) collapse(1)
for(int msi=0; msi<msTableLen; msi++) if (!solutionFound && msTable[ msi*msTableBlock ]<maxLen) {
CUBE C= *pC;
for(int i=0; i<N_BLOCKS; i++) C.c[i]= pC->c[ msTransTable[ msi*N_BLOCKS + i] ]; // apply transformation
checkMoveSet(&C, msTable + msi*msTableBlock);
}
}
static inline void checkLastChance(CUBE *pC, int maxLen) { // check templated move-sets aka last chance
int w= cubeWeight(pC);
bestWeight= newBestWeight= w; moveWeight= 1e9; nStepSol[step]= 0; solutionFound= 0;
#pragma omp parallel for schedule(dynamic,1024) collapse(1)
for(int msi=0; msi<msLastChanceLen; msi++) if (!solutionFound) {
int len= msLastChance[ msi*msLastChanceBlock ];
if (len<maxLen) { // limit with maximum achieved length
CUBE C= *pC;
for(int i=1; i<=len; i++) moveCubeSelf(&C, msLastChance[ msi*msLastChanceBlock + i ]);
checkMoveSet(&C, msLastChance + msi*msLastChanceBlock);
}
}
}
static inline void addSolToStack(int sol, float mw) { // add a solution with index sol and provided moveWeight
int stackPos= nStack;
if (nStack==stepMaxSol[step]) { // no space in stack
for(int i=0; i<nStack; i++)
if (mw < stackMoveWeight[i]) { stackPos= i; break; } // set stackPos to worst position
}
else nStack++; // have space => increment stack size counter
if (stackPos<nStack) { // add solution to the stack if possible
stackMoveWeight[stackPos]= mw;
for(int j=0; j<=stepSol[step][sol][0]; j++) stack[stackPos][j]= stepSol[step][sol][j];
}
}
void doWeightStep(CUBE *pC) { // perform weight step for the cube, use GLOBAL step variable
char stepStr[16]; sprintf(stepStr, linkStep>=0?"%d=>%d":"%d", step, linkStep);
printlog("STEP %s WEIGHT_ITERATIONS timeLim=%dms/%s,%dms/step\n",
stepStr, solTime[step], solTimeHard[step]?"hard":"soft", stepTime[step]);
int bestLen= -1, bestMoves[N_MAX_MOVES], beforeMoves[N_MAX_MOVES], nBeforeMoves= nMoves
, cw= cubeWeight(pC), maxReached= cw==maxWeight;
for(int i=0; i<nMoves; i++) beforeMoves[i]= moveList[i];
initMs(cw);
// solS, solE - start and end solution indexes for linked step, solI - increment +1 or -1, wsD - solutions limit
int solS= 0, solE= linkStep>=0 ? nStepSol[linkStep]-1 : 0, solI= 1, wsD= stepParams[step][1];
if (wsD) { solS= nStepSol[linkStep]-1; solE= nStepSol[linkStep]>wsD ? nStepSol[linkStep]-wsD : 0; solI= -1; }
int sol= solS;
while(!timeLimTest(0)) { // sol is linked solutions from solS to solE iterate while inside time limit
nStack= 1; stack[0][0]= stackMoveWeight[0]= nStepSol[step]= 0; // first empty solution in the stack to start
CUBE C1= cube0; C1.m0= C1.m1= C1.m2= -1; nMoves= 0; // start with the very start cube
for(int i=0; i<nBeforeMoves; i++) moveCubeSelf(&C1, moveList[nMoves++]= beforeMoves[i]); // to step position
if (linkStep>=0) for(int i=1; i<=stepSol[linkStep][sol][0]; i++) // to linkStep/sol position
moveCubeSelf(&C1, moveList[nMoves++]= stepSol[linkStep][sol][i]);
cw= cubeWeight(&C1); doWeightTrick(&C1); // do the trick for each solution, change moveList
int nMoves1= nMoves;
solTimeStart= timems();
for(int cnt=0; nStack && !timeLimTest(maxReached); cnt++) { // stack is not empty & time is not over
int item= nStack-1; float mw= stackMoveWeight[item]; // find smallest, start from stack end
for(int i=nStack-2; i>=0; i--) if (stackMoveWeight[i]<mw) { mw= stackMoveWeight[i]; item= i; }
CUBE cube=C1; nMoves= nMoves1; // update cube to stack position
for(int i=1; i<=stack[item][0]; i++) moveCubeSelf(&cube, moveList[nMoves++]= stack[item][i]);
for(int i= item; i<nStack-1; i++) { // remove item from the stack
stackMoveWeight[i]= stackMoveWeight[i+1];
for(int j=0; j<=stack[i][0]; j++) stack[i][j]= stack[i+1][j];
}
nStack--; // remove position from stack
int lastChance= 0, iter= 0; solutionFound= cubeWeight(&cube)==stepMaxWeights[step];
while(!solutionFound) {
printlog("sol %d*%d/%d*%d, w=%d-%d-%d: ", sol, cnt, nStack, iter, cw, bestWTweight, cubeWeight(&cube));
int maxLen= maxReached ? bestLen-nMoves : N_MAX_MOVES; // max is reached => limit search
lastChance ? checkLastChance(&cube, maxLen) : checkMoveSets(&cube, maxLen);
if (nStepSol[step]) { // solutions found
for(int sol=0; sol<nStepSol[step]-1; sol++) // add inside limit solutions to the stack
if (solMoveWeight[sol] <= stepParams[step][0]) addSolToStack(sol, mw+solMoveWeight[sol]);
for(int i=1; i<=stepSol[step][ nStepSol[step]-1 ][0]; i++) // move cube to the iteration solution
moveCubeSelf(&cube, moveList[nMoves++]= stepSol[step][ nStepSol[step]-1 ][i]);
iter++;
}
else if (!lastChance) lastChance=1; else break; // no solutions => use last chance or break
}
if ( (solutionFound || (!maxReached && !solutionFound)) // max is reached once => keep the status
&& (bestLen==-1 || nMoves<bestLen || (!maxReached && solutionFound)) ) {
if (!maxReached && solutionFound) maxReached= 1;
bestLen=nMoves; for(int i=0; i<bestLen; i++) bestMoves[i]= moveList[i];
printlog("sol %d*%d/%d*%d: w=%d-%d-%d moves=%d%s\n",
sol, cnt, nStack, iter, cw, bestWTweight, cubeWeight(&cube), nMoves, maxReached?"(+)":"(-)");
}
}
if (sol==solE) break; // solS=>solE done, or increment
sol+= solI;
}
bestStep= step; nStepSol[step]= 1; bestStepSol= 0; bestSolLen= stepSol[step][0][0]= bestLen;
for(int i=0; i<bestLen; i++) stepSol[step][0][i+1]= bestMoves[i];
*pC= cube0; nMoves= 0;
printlog("weight step done, target %s in %d moves\n", maxReached?"achieved!":"missed:(", bestLen);
free(msTable); free(msLastChance);
}
int main(int argc,char *argv[]) {
setbuf(stdout, NULL); // no buffering to pipe with python subprocess
remove(".solution"); remove(".error"); // remove solution and error files
char s[1024]= {0}; // concatenate command line arguments to s
for(int i=1; i<argc; i++) if (strlen(s)+strlen(argv[i])<1024) strcat(s, argv[i]);
// Generate random cube
CUBE cube= { m0:-1, m1:-1, m2:-1 };
if (!memcmp(s,"rand",4)) {
srand( timems() ); // initialize random generator
memcpy(cube.c, zeroCube, strlen(zeroCube)); // start with zero cube
for(int i=0; i<100+rand()%1000; i++) {
turnCube(&cube, &cube0, rand()%N_TURNS); memcpy(cube.c,cube0.c,N_BLOCKS);
}
for(int i=0; i<10000+rand()%100000; i++) moveCubeSelf(&cube, rand()%N_MOVES);
if (!strcmp(ALG_NAME,"cube333gear")) { // 333g only => make all edges fit cube position
while(!cube.c[10]) moveCubeSelf(&cube, 0);
while(!cube.c[14]) moveCubeSelf(&cube, 2);
while(!cube.c[5]) moveCubeSelf(&cube, 4);
}
printCubeLine(&cube);
return 0;
}
// Check cube string length
if (strlen(s)!=strlen(zeroCube)) error("Incorrect puzzle. Wrong number of blocks");
// Check corners & Build zero cube
if (nCorners) {
int nc= corLen, i, j, jc, x, y, z, k, hit[8]={0}, cnt=1;
for(i=0; i<strlen(zeroCube); i++) zeroCube[i]= '?';
for(x=0,k=0; x<nFaces; x++) for(y=0; y<faceLen[x]; y++,k++) for(z=0; z<nc; z++)
if (corder[0][z]==x) zeroCube[k]= s[corners[0][z]]; // set face colors for corner 0
for(i=1; i<nCorners; i++) { // iterate corners
char c0= zeroCube[ corners[i][0] ], c1= zeroCube[ corners[i][nc-1] ], c2= zeroCube[ corners[i][1] ];
for(j=1; j<nCorners; j++) if (!hit[j]) for(jc=0; jc<nc; jc++) { // corner J - try to fit with corner I
char s0= s[ corners[j][(jc+0)%nc] ], s1= s[ corners[j][(jc+1)%nc] ], cc= s[ corners[j][(jc+2)%nc] ];
if ( c1==s0 && c0==s1 || c0==s0 && c1==s1 ) { // corners I and J have two same items
if (c2!='?' && c2!=cc) error("Incorrect puzzle. Wrong corners");
for(x=0,k=0; x<nFaces; x++) for(y=0; y<faceLen[x]; y++,k++) if (corder[i][1]==x) zeroCube[k]= cc;
hit[j]= 1; cnt++; jc=nc; j=nCorners; // the corner J fit the place I
}
}
}
if (cnt!=nCorners) error("Incorrect puzzle. Wrong corners");
for(i=0; i<nRevCol; i++) for(j=0; j<2; j++)
for(k=0,z=0; k<nFaces; k++,z+=faceLen[k]) if (revColInd[i][j]==k) { revColInd[i][j]= z; break; }
for(i=0; i<256; i++) revColor[i]= (char)i;
for(i=0; i<nRevCol; i++) revColor[(int)zeroCube[ revColInd[i][0]] ]= zeroCube[ revColInd[i][1] ];
for(x=0,k=0; x<nFaces; x++) for(y=0; y<faceLen[x]; y++,k++) normColor[zeroCube[k]]= x;
printf("zero %.*s\n", (int)strlen(zeroCube), zeroCube);
}
// Cube to solve, save it for final check with all the moves
memcpy(cube.c, s, N_BLOCKS); cube0= cube;
// Algo steps
int prevStepEmpty= 0;
for(step=0; step<N_STEPS; step++) {
stepTimeStart= timems(); printf("\n");
linkStep= stepLink[step]; maxWeight= stepMaxWeights[step]; amoves0= stepMoves[step][0][0]; // set GLOBALs
amoves1= (stepMoveSetsLen[step][0]>1 ? stepMoves[step][0][1] : amoves0); // final step moves
if (!prevStepEmpty && !inSeq) printCube(&cube); // show cube to solve
if (stepSeq[step]==1) { bestSolLen= bestSolWeight= -1; inSeq= 1; seqTimeStart= stepTimeStart; } // start seq
else if (stepSeq[step]>1) { // join previous stepSeq[step] steps
nStepSol[step]= stepSeq[step];
char joinStr[256], *s= joinStr;
for(int ps=step-stepSeq[step], n=0; ps<step; ps++,n++) {
memcpy(stepSol[step][n], stepSol[ps][nStepSol[ps]-1], stepSol[ps][nStepSol[ps]-1][0]+1);
s+= n ? sprintf(s, ",%d", ps) : sprintf(s, "%d", ps);
}
printlog("STEP %d: join solutions from steps %s\n", step, joinStr);
continue;
}
// The cube is not solved or max weight not achieved or have solutions in linked step
if ((!isSolved(&cube) || (maxWeight && cubeWeight(&cube)<maxWeight)) && (linkStep==-1 || nStepSol[linkStep])) {
if (maxWeight && !nStepPrunes[step]) doWeightStep(&cube); // run weight step
else doPruneStep(&cube); // run prune table search step
prevStepEmpty= 0;
}else{ // the cube is solved for the step or nothing to check
if (linkStep>=0 && !nStepSol[linkStep])
printlog("STEP %d=>%d nothing to check from linked step\n", step, linkStep);
else{
printlog("STEP %d is solved\n", step);
if (inSeq) { nStepSol[step]= 1; stepSol[step][0][0]= 0; } // add zero solution for next steps
}
prevStepEmpty= 1;
}
// Analyze & process step results
int nMoves0= nMoves; moveLen[nMoveLen]= 0; // zero moves profile by default
if (!inSeq) { // non sequence step
if (nStepSol[step]>0) { // add moves from last solution to moveList
int lastSol= nStepSol[step]-1;
for(int i=1; i<=stepSol[step][lastSol][0]; i++) moveList[nMoves++]= stepSol[step][lastSol][i];
moveLen[nMoveLen]= stepSol[step][lastSol][0]; // profile moves
}
moveTime[nMoveLen++]= timems()-stepTimeStart; // save step time profile
}else if (inSeq && stepSeq[step]==-1) { // last step in sequence
inSeq= 0;
if (bestSolLen>=0) { // add moves from bestStep/bestStepSol to moveList
for(int i=1; i<=stepSol[bestStep][bestStepSol][0]; i++)
moveList[nMoves++]= stepSol[bestStep][bestStepSol][i];
moveLen[nMoveLen]= stepSol[bestStep][bestStepSol][0]; // profile moves
}
moveTime[nMoveLen++]= timems()-seqTimeStart; // save seq time profile
}
for(int i=nMoves0; i<nMoves; i++) moveCubeSelf(&cube, moveList[i]);
}
// Try to optimize the cube by removing and replacing some moves defined in .cr file
int nMoves0= nMoves, moveListOpt[N_MAX_MOVES], isOpt= 0;
printf("\nOptimizing...\n");
while(1) {
int i=0, j, nMovesOpt= 0;
while( i<nMoves-1 ) {
int m1= moveList[i], m2= moveList[i+1]; char *mn1= moveNames[m1], *mn2= moveNames[m2];
if ( moveSpecial[m1][0]==m2) { printf("\t%s-%s=skip at %d\n", mn1, mn2, i+1); i+=2; isOpt=1; }
else if (moveSpecial[m1][1]==m2 && moveSpecial[m1][2]>=0) {
int m= moveSpecial[m1][2]; moveListOpt[nMovesOpt++]= m; isOpt=1;
printf("\t%s+%s=%s at %d\n", mn1, mn2, moveNames[m], i+1); i+=2;
}else if (moveSpecial[m1][2]==m2 && moveSpecial[m1][3]>=0) {
int m= moveSpecial[m1][3]; moveListOpt[nMovesOpt++]= m; isOpt=1;
printf("\t%s+%s=%s at %d\n", mn1, mn2, moveNames[m], i+1); i+=2;
}else{
for(j=4; j<N_MOVES_SPECIAL; j++) if (moveSpecial[m1][j]==m2) break;
if (j<N_MOVES_SPECIAL && i<nMoves-2) {
int m3= moveList[i+2]; char *mn3= moveNames[m3];
if (moveSpecial[m1][0]==m3) {
printf("\t%s-%s-%s=%s at %d\n", mn1, mn2, mn3, mn2, i+1);
moveListOpt[nMovesOpt++]=m2; i+=3; isOpt=1;
}else if (moveSpecial[m1][1]==m3 && moveSpecial[m1][2]>=0) {
int m= moveSpecial[m1][2]; moveListOpt[nMovesOpt++]= m; moveListOpt[nMovesOpt++]= m2; isOpt=1;
printf("\t%s+%s+%s=%s+%s at %d\n", mn1, mn2, mn3, moveNames[m], mn2, i+1); i+=3;
}else if (moveSpecial[m1][2]==m3 && moveSpecial[m1][3]>=0) {
int m= moveSpecial[m1][3]; moveListOpt[nMovesOpt++]= m; moveListOpt[nMovesOpt++]= m2; isOpt=1;
printf("\treplace %s+%s+%s=%s+%s at %d\n", mn1, mn2, mn3, moveNames[m], mn2, i+1); i+=3;
}else{ moveListOpt[nMovesOpt++]= m1; i++; }
}else{ moveListOpt[nMovesOpt++]= m1; i++; }
}
}
if (i<nMoves) moveListOpt[nMovesOpt++]= moveList[i];
if (nMoves==nMovesOpt) break;
for(int i=0; i<nMovesOpt; i++) moveList[i]=moveListOpt[i];
nMoves= nMovesOpt;
}
if (!isOpt) printf("\tnothing to optimize\n"); else printf("\n");
// Show the solution and write out .solution file
FILE *f= fopen(".solution","w"); printf("\nSolution:\n");
for(int i=0; i<nMoves; i++) {
moveCubeSelf(&cube0, moveList[i]);
printf("*%d:%d\t%s\t", i+1, moveList[i], moveNames[moveList[i]]);
printCubeLine( &cube0 );
fprintf(f, "%s%d", (i ? " " : ""), moveList[i]);
}
fclose(f);
// Last check - is the cube solved for the last step
step=N_STEPS-1; if (isSolved(&cube0)) {
printCube(&cube0);
printf("moveLen=%d,%d", nMoves, nMoves0-nMoves); for(int i=0; i<nMoveLen; i++) printf(",%d", moveLen[i]);
printf("\nmoveTime=0,0"); for(int i=0; i<nMoveLen; i++) printf(",%ld", moveTime[i]);
printf("\n");
}else printf("FUCK!!! moveLen=0\n"); // something goes wrong
return 0;
}
|
global.c | #include <stdio.h>
#include <unistd.h> /* sleep, usleep */
#include "../gptl.h"
#ifdef HAVE_MPI
#include <mpi.h>
#endif
#ifdef THREADED_OMP
#include <omp.h>
#endif
int main (int argc, char **argv)
{
int iam = 0;
int nranks = 1; /* number of MPI tasks (default 1) */
int nthreads = 1; /* number of threads (default 1) */
int iter;
int tnum = 0;
#ifdef HAVE_PAPI
int code;
#endif
int ret;
unsigned int nsec; /* number of seconds to sleep */
#ifdef HAVE_PAPI
int sub (int, int);
#endif
ret = GPTLsetoption (GPTLabort_on_error, 1);
#ifdef HAVE_PAPI
ret = GPTLevent_name_to_code ("PAPI_FP_OPS", &code);
if (ret == 0) {
printf ("Enabling option PAPI_FP_OPS\n");
ret = GPTLsetoption (code, 1);
} else {
printf ("Unable to get option for PAPI_FP_OPS\n");
}
#endif
#ifdef HAVE_MPI
if (MPI_Init (&argc, &argv) != MPI_SUCCESS) {
printf ("Failure from MPI_Init\n");
return 1;
}
ret = MPI_Comm_rank (MPI_COMM_WORLD, &iam);
ret = MPI_Comm_size (MPI_COMM_WORLD, &nranks);
#endif
ret = GPTLinitialize ();
ret = GPTLstart ("total");
#ifdef THREADED_OMP
nthreads = omp_get_max_threads ();
#pragma omp parallel for private (ret, tnum, nsec)
#endif
for (iter = 0; iter < nthreads; ++iter) {
#ifdef THREADED_OMP
tnum = omp_get_thread_num ();
#endif
/* Test 1: threaded sleep */
ret = GPTLstart ("nranks-iam+mythread");
nsec = (unsigned int) nranks-iam+tnum;
ret = sleep (nsec);
ret = GPTLstop ("nranks-iam+mythread");
}
/* Test 2: 5-task sleep(iam) ms */
if (iam > 0 && iam < 6) {
ret = GPTLstart ("1-5_iam");
nsec = iam;
ret = sleep (nsec);
ret = GPTLstop ("1-5_iam");
}
#ifdef HAVE_PAPI
/* Test 3: PAPI */
ret = GPTLstart ("1e3*iam*mythread_FP_OPS");
ret = sub (iam, tnum);
ret = GPTLstop ("1e3*iam*mythread_FP_OPS");
#endif
ret = GPTLstop ("total");
ret = GPTLpr (iam);
if (iam == 0)
printf ("global: testing GPTLpr_summary...\n");
#ifdef HAVE_MPI
if (GPTLpr_summary (MPI_COMM_WORLD) != 0)
return 1;
ret = MPI_Finalize ();
#else
if (GPTLpr_summary () != 0)
return 1;
#endif
if (GPTLfinalize () != 0)
return 1;
return 0;
}
#ifdef HAVE_PAPI
int sub (int iam, int tnum)
{
float sum;
int i;
sum = 1.7;
for (i = 0; i < iam*tnum; ++i)
sum *= 0.999;
printf ("sum=%f\n", sum);
return 0;
}
#endif
|
cpd_omp.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include <assert.h>
#include <math.h>
#ifdef PARTI_USE_MAGMA
#include "magma_v2.h"
#include "magma_lapack.h"
#else
#include "clapack.h"
#endif
#include "hicoo.h"
#ifdef PARTI_USE_OPENMP
double OmpCpdAlsStepHiCOO(
sptSparseTensorHiCOO const * const hitsr,
sptIndex const rank,
sptIndex const niters,
double const tol,
const int tk,
const int tb,
const int * par_iters,
sptRankMatrix ** mats,
sptRankMatrix *** copy_mats,
sptValue * const lambda,
int balanced)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const stride = mats[0]->stride;
double fit = 0;
omp_set_num_threads(tk);
#ifdef PARTI_USE_MAGMA
magma_set_omp_numthreads(tk);
magma_set_lapack_numthreads(tk);
// printf("magma nthreads: %d\n", magma_get_parallel_numthreads());
// printf("magma nthreads: %d\n", magma_get_omp_numthreads());
// printf("magma lapack nthreads: %d\n", magma_get_lapack_numthreads());
#endif
// sptAssert(stride == rank); // for correct column-major magma functions
for(sptIndex m=0; m < nmodes; ++m) {
sptAssert(hitsr->ndims[m] == mats[m]->nrows);
sptAssert(mats[m]->ncols == rank);
}
sptValue alpha = 1.0, beta = 0.0;
char notrans = 'N';
// char trans = 'T';
char uplo = 'L';
int blas_rank = (int) rank;
int blas_stride = (int) stride;
sptRankMatrix * tmp_mat = mats[nmodes];
sptRankMatrix ** ata = (sptRankMatrix **)malloc((nmodes+1) * sizeof(*ata));
for(sptIndex m=0; m < nmodes+1; ++m) {
ata[m] = (sptRankMatrix *)malloc(sizeof(sptRankMatrix));
sptAssert(sptNewRankMatrix(ata[m], rank, rank) == 0);
sptAssert(mats[m]->stride == ata[m]->stride);
}
/* Compute all "ata"s */
for(sptIndex m=0; m < nmodes; ++m) {
/* ata[m] = mats[m]^T * mats[m]), actually do A * A' due to row-major mats, and output an upper triangular matrix. */
int blas_nrows = (int)(mats[m]->nrows);
ssyrk_(&uplo, ¬rans, &blas_rank, &blas_nrows, &alpha,
mats[m]->values, &blas_stride, &beta, ata[m]->values, &blas_stride);
}
// printf("Initial mats:\n");
// for(size_t m=0; m < nmodes+1; ++m)
// sptDumpRankMatrix(mats[m], stdout);
// printf("Initial ata:\n");
// for(sptIndex m=0; m < nmodes+1; ++m)
// sptDumpRankMatrix(ata[m], stdout);
double oldfit = 0;
sptIndex * mats_order = (sptIndex*)malloc(nmodes * sizeof(*mats_order));
sptTimer tmp_timer;
sptNewTimer(&tmp_timer, 0);
// double mttkrp_time, solver_time, norm_time, ata_time, fit_time;
// double sum_time = 0.0;
for(sptIndex it=0; it < niters; ++it) {
// printf(" its = %3lu\n", it+1);
// sum_time = 0.0;
sptTimer timer;
sptNewTimer(&timer, 0);
sptStartTimer(timer);
for(sptIndex m=0; m < nmodes; ++m) {
// printf("\nmode %u \n", m);
tmp_mat->nrows = mats[m]->nrows;
/* Factor Matrices order */
mats_order[0] = m;
for(sptIndex i=1; i<nmodes; ++i)
mats_order[i] = (m+i) % nmodes;
// sptAssert (sptOmpMTTKRPHiCOO_MatrixTiling(hitsr, mats, mats_order, m) == 0);
sptStartTimer(tmp_timer);
if(par_iters[m] == 1) {
sptAssert (sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats[m], mats_order, m, tk, tb, balanced) == 0);
} else {
sptAssert (sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled(hitsr, mats, mats_order, m, tk, tb, balanced) == 0);
}
sptStopTimer(tmp_timer);
// mttkrp_time = sptPrintElapsedTime(tmp_timer, "MTTKRP");
// printf("sptMTTKRPHiCOO_MatrixTiling mats[nmodes]:\n");
// sptDumpRankMatrix(mats[nmodes], stdout);
sptStartTimer(tmp_timer);
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for num_threads(tk)
#endif
for(sptIndex i=0; i<mats[m]->nrows * stride; ++i)
mats[m]->values[i] = tmp_mat->values[i];
/* Solve ? * ata[nmodes] = mats[nmodes] (tmp_mat) */
/* result is row-major, solve AT XT = BT */
sptAssert ( sptRankMatrixSolveNormals(m, nmodes, ata, mats[m]) == 0 );
sptStopTimer(tmp_timer);
// solver_time = sptPrintElapsedTime(tmp_timer, "memcpy and sptRankMatrixSolveNormals");
// printf("Inverse mats[m]:\n");
// sptDumpRankMatrix(mats[m], stdout);
/* Normalized mats[m], store the norms in lambda. Use different norms to avoid precision explosion. */
sptStartTimer(tmp_timer);
if (it == 0 ) {
sptRankMatrix2Norm(mats[m], lambda);
} else {
sptRankMatrixMaxNorm(mats[m], lambda);
}
sptStopTimer(tmp_timer);
// norm_time = sptPrintElapsedTime(tmp_timer, "matrix norm");
// printf("Normalize mats[m]:\n");
// sptDumpRankMatrix(mats[m], stdout);
// printf("lambda:\n");
// for(size_t i=0; i<rank; ++i)
// printf("%lf ", lambda[i]);
// printf("\n\n");
/* ata[m] = mats[m]^T * mats[m]) */
sptStartTimer(tmp_timer);
int blas_nrows = (int)(mats[m]->nrows);
ssyrk_(&uplo, ¬rans, &blas_rank, &blas_nrows, &alpha,
mats[m]->values, &blas_stride, &beta, ata[m]->values, &blas_stride);
sptStopTimer(tmp_timer);
// ata_time = sptPrintElapsedTime(tmp_timer, "update ata");
// printf("Update ata[m]:\n");
// sptDumpRankMatrix(ata[m], stdout);
// sum_time += mttkrp_time + norm_time + ata_time;
} // Loop nmodes
// PrintDenseValueVector(lambda, rank, "lambda", "debug.txt");
sptStartTimer(tmp_timer);
fit = KruskalTensorFitHiCOO(hitsr, lambda, mats, ata);
sptStopTimer(tmp_timer);
// fit_time = sptPrintElapsedTime(tmp_timer, "KruskalTensorFitHiCOO");
sptStopTimer(timer);
double its_time = sptElapsedTime(timer);
sptFreeTimer(timer);
printf(" its = %3u ( %.3lf s ) fit = %0.5f delta = %+0.4e\n",
it+1, its_time, fit, fit - oldfit);
if(it > 0 && fabs(fit - oldfit) < tol) {
break;
}
oldfit = fit;
} // Loop niters
GetRankFinalLambda(rank, nmodes, mats, lambda);
for(sptIndex m=0; m < nmodes+1; ++m) {
sptFreeRankMatrix(ata[m]);
}
free(ata);
free(mats_order);
return fit;
}
int sptOmpCpdAlsHiCOO(
sptSparseTensorHiCOO const * const hitsr,
sptIndex const rank,
sptIndex const niters,
double const tol,
const int tk,
const int tb,
int balanced,
sptRankKruskalTensor * ktensor)
{
sptIndex nmodes = hitsr->nmodes;
#ifdef PARTI_USE_MAGMA
magma_init();
#endif
/* Initialize factor matrices */
sptIndex max_dim = 0;
for(sptIndex m=0; m < nmodes; ++m) {
max_dim = (hitsr->ndims[m] > max_dim) ? hitsr->ndims[m] : max_dim;
}
sptRankMatrix ** mats = (sptRankMatrix **)malloc((nmodes+1) * sizeof(*mats));
for(sptIndex m=0; m < nmodes+1; ++m) {
mats[m] = (sptRankMatrix *)malloc(sizeof(sptRankMatrix));
}
for(sptIndex m=0; m < nmodes; ++m) {
sptAssert(sptNewRankMatrix(mats[m], hitsr->ndims[m], rank) == 0);
// assert(sptConstantRankMatrix(mats[m], 1) == 0);
sptAssert(sptRandomizeRankMatrix(mats[m], hitsr->ndims[m], rank) == 0);
}
sptAssert(sptNewRankMatrix(mats[nmodes], max_dim, rank) == 0);
sptAssert(sptConstantRankMatrix(mats[nmodes], 0) == 0);
/* determine niters or num_kernel_dim to be parallelized */
int * par_iters = (int *)malloc(nmodes * sizeof(*par_iters));
sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits);
for(sptIndex m=0; m < nmodes; ++m) {
par_iters[m] = 0;
sptIndex num_kernel_dim = (hitsr->ndims[m] + sk - 1) / sk;
// printf("num_kernel_dim: %u, hitsr->nkiters[m] / num_kernel_dim: %u\n", num_kernel_dim, hitsr->nkiters[m]/num_kernel_dim);
if(num_kernel_dim <= PAR_MIN_DEGREE * NUM_CORES && hitsr->nkiters[m] / num_kernel_dim >= PAR_DEGREE_REDUCE) {
par_iters[m] = 1;
}
}
printf("par_iters:\n");
for(sptIndex m=0; m < nmodes; ++m) {
printf("%d, ", par_iters[m]);
}
printf("\n");
sptRankMatrix *** copy_mats = (sptRankMatrix ***)malloc(nmodes * sizeof(*copy_mats));
for(sptIndex m=0; m < nmodes; ++m) {
if (par_iters[m] == 1) {
copy_mats[m] = (sptRankMatrix **)malloc(tk * sizeof(sptRankMatrix*));
for(int t=0; t<tk; ++t) {
copy_mats[m][t] = (sptRankMatrix *)malloc(sizeof(sptRankMatrix));
sptAssert(sptNewRankMatrix(copy_mats[m][t], hitsr->ndims[m], rank) == 0);
sptAssert(sptConstantRankMatrix(copy_mats[m][t], 0) == 0);
}
}
}
sptTimer timer;
sptNewTimer(&timer, 0);
sptStartTimer(timer);
ktensor->fit = OmpCpdAlsStepHiCOO(hitsr, rank, niters, tol, tk, tb, par_iters, mats, copy_mats, ktensor->lambda, balanced);
sptStopTimer(timer);
sptPrintElapsedTime(timer, "CPU HiCOO SpTns CPD-ALS");
sptFreeTimer(timer);
ktensor->factors = mats;
#ifdef PARTI_USE_MAGMA
magma_finalize();
#endif
sptFreeRankMatrix(mats[nmodes]);
for(sptIndex m=0; m < nmodes; ++m) {
if(par_iters[m] == 1) {
for(int t=0; t<tk; ++t) {
sptFreeRankMatrix(copy_mats[m][t]);
free(copy_mats[m][t]);
}
free(copy_mats[m]);
}
}
free(copy_mats);
return 0;
}
#endif
|
Par-17-ParForLoopNoWaitBarrier.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
#pragma omp parallel
{
#pragma omp for nowait
for (int i = 0; i < 4; ++i) {
a[i] = 3*a[i];
}
#pragma omp barrier
#pragma omp for nowait
for (int i = 0; i < 4; ++i) {
a[i] += a[i];
}
}
return 0;
}
|
render2.c | #include "simpleRayTracer.h"
void renderKernel2(const int NI,
const int NJ,
scene_t scene,
const sensor_t sensor,
const dfloat costheta,
const dfloat sintheta,
const dfloat *randomNumbers,
unsigned char *img){
const colour_t bg = sensor.bg;
// unpack contents of scene
grid_t *grid = scene.grid;
material_t *materials = scene.materials;
shape_t *shapes = scene.shapes;
light_t *lights = scene.lights;
const int Nlights = scene.Nlights;
const int Nmaterials = scene.Nmaterials;
const int Nshapes = scene.Nshapes;
// (I,J) loop over pixels in image
#pragma omp parallel
for(int K=omp_get_thread_num();K<NI*NJ;K+=omp_get_num_threads()){
if (omp_get_thread_num() == 0) continue;
int I = K%NI;
int J = K/NI;
ray_t r;
dfloat coef = 1.0;
int level = 0;
// 2.5 location of sensor pixel
colour_t c;
dfloat x0 = sensor.eyeX.x;
dfloat y0 = sensor.eyeX.y;
dfloat z0 = sensor.eyeX.z;
// multiple rays emanating from sensor, passing through lens and focusing at the focal plane
// 1. compute intersection of ray passing through lens center to focal plane
// (sensorX + alpha*(lensC -sensorX)).sensorN = focalPlaneOffset
// alpha = (focalOffset-s.sensorN)/( (lensC-s).sensorN) [ . dot product ]
dfloat cx = BOXSIZE/2., cy = HEIGHT, cz = BOXSIZE/2;
vector_t sensorN = vectorCrossProduct(sensor.Idir, sensor.Jdir);
vector_t sensorX = sensorLocation(NI, NJ, I, J, sensor);
dfloat focalPlaneOffset = sensor.focalPlaneOffset;
vector_t centralRayDir = vectorSub(sensor.lensC, sensorX);
dfloat alpha = (focalPlaneOffset - vectorDot(sensorX, sensorN))/vectorDot(centralRayDir, sensorN);
// 2. target
vector_t targetX = vectorAdd(sensorX, vectorScale(alpha, centralRayDir));
x0 = sensorX.x;
y0 = sensorX.y;
z0 = sensorX.z;
// 3. loop over vertical offsets on lens (thin lens)
c.red = 0; c.green = 0; c.blue = 0;
for(int samp=0;samp<p_Nsamples;++samp){
// aperture width
int sampId = (I+J*NI + samp*25*25)%NRANDOM;
dfloat offI = p_apertureRadius;
dfloat offJ = p_apertureRadius;
// choose random starting point on lens (assumes lens and sensor arre parallel)
if(samp>0) { // primary ray
x0 = sensor.lensC.x + offI*sensor.Idir.x + offJ*sensor.Jdir.x;
y0 = sensor.lensC.y + offI*sensor.Idir.y + offJ*sensor.Jdir.y;
z0 = sensor.lensC.z + offI*sensor.Idir.z + offJ*sensor.Jdir.z;
}
dfloat dx0 = targetX.x - x0;
dfloat dy0 = targetX.y - y0;
dfloat dz0 = targetX.z - z0;
dfloat L0 = sqrt(dx0*dx0+dy0*dy0+dz0*dz0);
dx0 = dx0/L0;
dy0 = dy0/L0;
dz0 = dz0/L0;
r.start.x = costheta*(x0-cx) - sintheta*(z0-cz) + cx;
r.start.y = y0;
r.start.z = sintheta*(x0-cx) + costheta*(z0-cz) + cz;
r.dir.x = costheta*dx0 - sintheta*dz0;
r.dir.y = dy0;
r.dir.z = sintheta*dx0 + costheta*dz0;
// trace ray through scene (possibly with multipathing, reflection, refraction)
colour_t newc =
gridTrace(grid[0], Nshapes, shapes, Nlights, lights, Nmaterials, materials, r, level, coef, bg);
// add colors to final intensity for IJ pixel
dfloat sc = (samp==0) ? p_primaryWeight: 1.f;
c.red += sc*newc.red;
c.green += sc*newc.green;
c.blue += sc*newc.blue;
}
// primary weighted average
c.red /= (p_primaryWeight+p_Nsamples-1);
c.green /= (p_primaryWeight+p_Nsamples-1);
c.blue /= (p_primaryWeight+p_Nsamples-1);
if (c.red*255.0f < 1 && c.green*255.0f < 1 && c.blue*255.0f < 1) {
c.red = 1.1f/255;
c.green = 1.1f/255;
c.blue = 1.1f/255;
}
// store pixel rgb intensities (reverse vertical because of lensing)
img[(I + (NJ-1-J)*NI)*3 + 0] = (unsigned char)min( c.red*255.0f, 255.0f);
img[(I + (NJ-1-J)*NI)*3 + 1] = (unsigned char)min(c.green*255.0f, 255.0f);
img[(I + (NJ-1-J)*NI)*3 + 2] = (unsigned char)min( c.blue*255.0f, 255.0f);
}
}
void interpolateScene(const int NI,
const int NJ,
unsigned char *img) {
for (int J = 0; J < NJ; ++J) {
for (int I = 0; I < NI; ++I) {
unsigned char this_red = img[(I + (NJ-1-J)*NI)*3 + 0];
unsigned char this_green = img[(I + (NJ-1-J)*NI)*3 + 1];
unsigned char this_blue = img[(I + (NJ-1-J)*NI)*3 + 2];
if (this_red != 0 || this_green != 0 || this_blue != 0) continue;
unsigned char up_red = 0;
unsigned char up_green = 0;
unsigned char up_blue = 0;
unsigned char down_red = 0;
unsigned char down_green = 0;
unsigned char down_blue = 0;
unsigned char left_red = 0;
unsigned char left_green = 0;
unsigned char left_blue = 0;
unsigned char right_red = 0;
unsigned char right_green = 0;
unsigned char right_blue = 0;
if (I > 0) {
left_red = img[(I -1+ (NJ-1-J)*NI)*3 + 0];
left_green = img[(I -1+ (NJ-1-J)*NI)*3 + 1];
left_blue = img[(I -1+ (NJ-1-J)*NI)*3 + 2];
}
if (I < NI - 1) {
right_red = img[(I + 1 + (NJ-1-J)*NI)*3 + 0];
right_green = img[(I + 1 +(NJ-1-J)*NI)*3 + 1];
right_blue = img[(I + 1 +(NJ-1-J)*NI)*3 + 2];
}
if (J < NJ - 1) {
down_red = img[(I + (NJ-J)*NI)*3 + 0];
down_green = img[(I + (NJ-J)*NI)*3 + 1];
down_blue = img[(I + (NJ-J)*NI)*3 + 2];
}
if (J > 0) {
up_red = img[(I + (NJ-2-J)*NI)*3 + 0];
up_green = img[(I + (NJ-2-J)*NI)*3 + 1];
up_blue = img[(I + (NJ-2-J)*NI)*3 + 2];
}
int nnz = 4;
if (left_red == 0 && left_green == 0 && left_blue == 0) nnz--;
if (right_red == 0 && right_green == 0 && right_blue == 0) nnz--;
if (up_red == 0 && up_green == 0 && up_blue == 0) nnz--;
if (down_red == 0 && down_green == 0 && down_blue == 0) nnz--;
if (nnz == 0) continue;
img[(I + (NJ-1-J)*NI)*3 + 0] = (left_red + right_red + up_red + down_red)/nnz;
img[(I + (NJ-1-J)*NI)*3 + 1] = (left_green + right_green + up_green + down_green)/nnz;
img[(I + (NJ-1-J)*NI)*3 + 2] = (left_blue + right_blue + up_blue + down_blue)/nnz;
}
}
}
|
info.c | // RUN: %libomptarget-compile-nvptx64-nvidia-cuda && env LIBOMPTARGET_INFO=1 %libomptarget-run-nvptx64-nvidia-cuda 2>&1 | %fcheck-nvptx64-nvidia-cuda -allow-empty -check-prefix=INFO
#include <stdio.h>
#include <omp.h>
int main() {
int ptr = 1;
// INFO: CUDA device {{[0-9]+}} info: Device supports up to {{[0-9]+}} CUDA blocks and {{[0-9]+}} threads with a warp size of {{[0-9]+}}
// INFO: CUDA device {{[0-9]+}} info: Launching kernel {{.*}} with {{[0-9]+}} blocks and {{[0-9]+}} threads in Generic mode
#pragma omp target map(tofrom:ptr)
{ptr = 1;}
return 0;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "coders/coders-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
MagickBooleanType
supported;
PixelChannel
channel;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(QuantumScale*
GetPixelAlpha(image,q)*opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,ClampToQuantum((double) QuantumRange*
GetPixelAlpha(image,q)/(MagickRealType) opacity),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const PixelChannel channel,
const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
if (channel == GrayPixelChannel)
{
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
SetPixelIndex(image,index,q);
}
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (channel == AlphaPixelChannel)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
}
else
SetPixelChannel(image,channel,pixel,q);
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,const ssize_t row,
const PixelChannel channel,const unsigned char *pixels,
ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
Quantum
*q;
ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum(((MagickRealType) QuantumRange)*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channel,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channel,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const PixelChannel channel,
ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,y,channel,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,
const PixelChannel channel,MagickOffsetType *sizes,
ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,y,channel,pixels,exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
unsigned char
*p,
*q;
ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,
const PixelChannel channel,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
unsigned char
*p;
size_t
count,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,y,channel,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel_index,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
end_offset,
offset;
MagickBooleanType
status;
PixelChannel
channel;
end_offset=(MagickOffsetType) layer_info->channel_info[channel_index].size-2;
if (layer_info->channel_info[channel_index].supported == MagickFalse)
{
(void) SeekBlob(image,end_offset,SEEK_CUR);
return(MagickTrue);
}
channel_image=image;
channel=layer_info->channel_info[channel_index].channel;
mask=(Image *) NULL;
if (channel == ReadMaskPixelChannel)
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)) ||
(layer_info->mask.page.width < 1) ||
(layer_info->mask.page.height < 1))
{
(void) SeekBlob(image,end_offset,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
channel=GrayPixelChannel;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,channel,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,channel,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,channel,compression,
(const size_t) end_offset,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+end_offset,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType GetPixelChannelFromPsdIndex(const PSDInfo *psd_info,
ssize_t index,PixelChannel *channel)
{
*channel=RedPixelChannel;
switch (psd_info->mode)
{
case BitmapMode:
case IndexedMode:
case GrayscaleMode:
{
if (index == 1)
index=-1;
else if (index > 1)
index=StartMetaPixelChannel+index-2;
break;
}
case LabMode:
case MultichannelMode:
case RGBMode:
{
if (index == 3)
index=-1;
else if (index > 3)
index=StartMetaPixelChannel+index-4;
break;
}
case CMYKMode:
{
if (index == 4)
index=-1;
else if (index > 4)
index=StartMetaPixelChannel+index-5;
break;
}
}
if ((index < -2) || (index >= MaxPixelChannels))
return(MagickFalse);
if (index == -1)
*channel=AlphaPixelChannel;
else if (index == -2)
*channel=ReadMaskPixelChannel;
else
*channel=(PixelChannel) index;
return(MagickTrue);
}
static void SetPsdMetaChannels(Image *image,const PSDInfo *psd_info,
const unsigned short channels,ExceptionInfo *exception)
{
ssize_t
number_meta_channels;
number_meta_channels=(ssize_t) channels-psd_info->min_channels;
if (image->alpha_trait == BlendPixelTrait)
number_meta_channels--;
if (number_meta_channels > 0)
(void) SetPixelMetaChannels(image,(size_t) number_meta_channels,exception);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
SetPsdMetaChannels(layer_info->image,psd_info,layer_info->channels,exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
PixelChannel
channel;
if (layer_info->channel_info[i].supported == MagickFalse)
continue;
channel=layer_info->channel_info[i].channel;
if ((i == 0) && (psd_info->mode == IndexedMode) &&
(channel != RedPixelChannel))
return(MagickFalse);
if (channel == AlphaPixelChannel)
{
channel_type|=AlphaChannel;
continue;
}
if (channel == RedPixelChannel)
channel_type&=~RedChannel;
else if (channel == GreenPixelChannel)
channel_type&=~GreenChannel;
else if (channel == BluePixelChannel)
channel_type&=~BlueChannel;
else if (channel == BlackPixelChannel)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
ssize_t
count,
index,
i,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].supported=GetPixelChannelFromPsdIndex(
psd_info,(ssize_t) ReadBlobSignedShort(image),
&layer_info[i].channel_info[j].channel);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].channel,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (layer_info[i].channel_info[j].channel == AlphaPixelChannel)
{
layer_info[i].image->alpha_trait=BlendPixelTrait;
break;
}
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,ReadPolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
SetPsdMetaChannels(image,psd_info,psd_info->channels,exception);
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
PixelChannel
channel;
status=GetPixelChannelFromPsdIndex(psd_info,i,&channel);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"MaximumChannelsExceeded","'%.20g'",(double) i);
break;
}
if (compression == RLE)
status=ReadPSDChannelRLE(image,channel,sizes+(i*image->rows),exception);
else
status=ReadPSDChannelRaw(image,channel,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
switch (psd_info.mode)
{
case LabMode:
{
(void) SetImageColorspace(image,LabColorspace,exception);
break;
}
case CMYKMode:
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case BitmapMode:
case GrayscaleMode:
case DuotoneMode:
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case IndexedMode:
{
psd_info.min_channels=1;
break;
}
case MultichannelMode:
{
if ((psd_info.channels > 0) && (psd_info.channels < 3))
{
psd_info.min_channels=psd_info.channels;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
break;
}
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
const char
*option;
Image
*next;
MagickBooleanType
replicate_profile;
option=GetImageOption(image_info,"psd:replicate-profile");
replicate_profile=IsStringTrue(option);
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
{
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
if (replicate_profile == MagickFalse)
break;
}
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
ssize_t
i,
j;
unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
const Quantum
*p;
ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,WritePolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
length,
num_channels;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) &&
(image_info->type != TrueColorAlphaType) &&
(image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
const char
*option;
CompressionType
compression;
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
option=GetImageOption(image_info,"psd:write-layers");
if (IsStringFalse(option) != MagickTrue)
{
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
(void) WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
(void) WriteBlobMSBLong(image,0); /* user mask data */
}
/*
Write composite image.
*/
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
sdf.h | /*
* Generate SDF
* by R. Falque
* 11/02/2020
*/
#ifndef SDF_H
#define SDF_H
#include <Eigen/Core>
#include <unsupported/Eigen/CXX11/Tensor>
#include <vector>
#include <string>
#include <iostream>
#include <sstream>
#include "EigenTools/getMinMax.h"
#include "EigenTools/nanoflannWrapper.h"
#include "sgn.h"
#include "IO/writePNG.h"
#include "IO/process_folder.h"
// polyscope wrapper
class SDF
{
private:
Eigen::MatrixXd vertices_;
Eigen::MatrixXd normals_;
int grid_resolution_;
double bounding_box_scale_;
Eigen::Tensor<double, 3> SDF_;
double grid_size_;
Eigen::Vector3d source_;
public:
SDF(Eigen::MatrixXd & vertices, Eigen::MatrixXd & normals, int grid_resolution, double bounding_box_scale)
{
vertices_ = vertices;
normals_ = normals;
grid_resolution_ = grid_resolution;
bounding_box_scale_ = bounding_box_scale;
init();
}
// destructor
~SDF()
{
}
//accessors
inline Eigen::Tensor<double, 3> get_SDF(){return SDF_;};
inline double get_grid_size(){return grid_size_;};
inline Eigen::Vector3d get_source(){return source_;};
void init() {
Eigen::Vector3d min_point, max_point;
getMinMax(vertices_, min_point, max_point);
//double bounding_box_size = (max_point - min_point).norm() * bounding_box_scale;
double bounding_box_size = (max_point - min_point).maxCoeff() * bounding_box_scale_; // diagonal versus max direction
double leaf_size = bounding_box_size/(grid_resolution_-1);
double inv_leaf_size = 1.0/leaf_size;
Eigen::Vector3i min_box, max_box, number_of_bins;
min_box << floor(min_point(0)*inv_leaf_size), floor(min_point(1)*inv_leaf_size) , floor(min_point(2)*inv_leaf_size);
max_box << floor(max_point(0)*inv_leaf_size), floor(max_point(1)*inv_leaf_size) , floor(max_point(2)*inv_leaf_size);
number_of_bins << max_box(0) - min_box(0) + 1, max_box(1) - min_box(1) + 1, max_box(2) - min_box(2) + 1;
SDF_.resize(number_of_bins(0), number_of_bins(1), number_of_bins(2));
nanoflann_wrapper tree(vertices_);
for (int x = 0; x < number_of_bins(0); ++x)
for (int y = 0; y < number_of_bins(1); ++y)
{
#pragma omp parallel for
for (int z = 0; z < number_of_bins(2); ++z)
{
std::vector< int > closest_point;
Eigen::Vector3d point;
point << x, y, z;
point *= leaf_size;
point += min_point;
closest_point = tree.return_k_closest_points(point, 1);
double sign = ( vertices_.col(closest_point[0]) - point ).dot( normals_.col(closest_point[0]) );
sign /= abs(sign);
SDF_(x, y, z) = ( vertices_.col(closest_point[0]) - point ).norm() * sign;
}
}
grid_size_ = leaf_size;
source_ = min_point;
}
inline bool generate_graph(Eigen::MatrixXd & vertices, Eigen::MatrixXi & edges)
{
std::vector< Eigen::Vector3d > vertices_vector;
std::vector< Eigen::Vector2i > edges_vector;
Eigen::Vector3d centroid;
Eigen::Tensor<int, 3> grid_indices(SDF_.dimension(0), SDF_.dimension(1), SDF_.dimension(2));
// build vertices
for (int x = 0; x < SDF_.dimension(0); ++x)
for (int y = 0; y < SDF_.dimension(1); ++y)
for (int z = 0; z < SDF_.dimension(2); ++z) {
centroid << x, y, z;
centroid *= grid_size_;
centroid += source_;
vertices_vector.push_back(centroid);
grid_indices(x, y, z) = vertices_vector.size();
}
// build edges
for (int x = 0; x < SDF_.dimension(0); ++x)
for (int y = 0; y < SDF_.dimension(1); ++y)
for (int z = 0; z < SDF_.dimension(2); ++z)
{
// case on x
if (x-1>=0) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x-1, y, z);
edges_vector.push_back(edge_temp);
}
if (x+1<SDF_.dimension(0)) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x+1, y, z);
edges_vector.push_back(edge_temp);
}
// case on y
if (y-1>=0) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x, y-1, z);
edges_vector.push_back(edge_temp);
}
if (y+1<SDF_.dimension(1)) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x, y+1, z);
edges_vector.push_back(edge_temp);
}
// case on z
if (z-1>=0) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x, y, z-1);
edges_vector.push_back(edge_temp);
}
if (z+1<SDF_.dimension(2)) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x, y, z+1);
edges_vector.push_back(edge_temp);
}
}
vertices.resize(3, vertices_vector.size());
for (int i=0; i< vertices_vector.size(); i++)
vertices.col(i) = vertices_vector[i];
edges.resize(2, edges_vector.size());
for (int i=0; i< edges_vector.size(); i++)
edges.col(i) = edges_vector[i];
return true;
};
inline bool print_to_folder(std::string folder_name)
{
bool folder_exist = does_folder_exist(folder_name);
if (!folder_exist) {
std::cout << "Error: the folder does not exist\n";
create_folder(folder_name);
}
empty_folder(folder_name);
#pragma omp parallel for
for (int i=0; i<SDF_.dimension(2); i++) {
Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic> slice;
Eigen::Tensor<double, 2> tensor_slice;
Eigen::array<long int,3> offset = {0,0,i}; //Starting point
Eigen::array<long int,3> extent = {SDF_.dimension(0),SDF_.dimension(1),0}; //Finish point
tensor_slice = SDF_.slice(offset, extent).reshape(Eigen::array<long int,2>{SDF_.dimension(0),SDF_.dimension(1)});
slice = Eigen::Map<const Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>> (tensor_slice.data(), tensor_slice.dimension(0),tensor_slice.dimension(1));
//slice /= slice.maxCoeff() ;
Eigen::MatrixXd R, G, B;
R = slice;
R = R.cwiseMax(0);
R /= R.maxCoeff();
G = -slice;
G = G.cwiseMax(0);
G /= G.maxCoeff();
B = G;
std::stringstream ss;
ss << std::setw(3) << std::setfill('0') << i;
std::string s = ss.str();
std::string file_name = folder_name + s + ".png";
writePNG(R, G, B, file_name);
}
std::cout << "Progress: Stack of images written in :" << folder_name << std::endl;
return true;
};
};
#endif |
GB_unaryop__ainv_bool_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_uint16
// op(A') function: GB_tran__ainv_bool_uint16
// C type: bool
// A type: uint16_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_uint16
(
bool *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
syr2k.c | /**
* syr2k.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.10
/* Problem size */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define N SIZE
#define M SIZE
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0)
*/
#define ALPHA 12435
#define BETA 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
C[i * N + j] = ((DATA_TYPE)i * j + 2) / N;
}
for (j = 0; j < M; j++) {
A[i * N + j] = ((DATA_TYPE)i * j) / N;
B[i * N + j] = ((DATA_TYPE)i * j + 1) / N;
}
}
}
void syr2k(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) {
int i, j, k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
C[i * N + j] *= BETA;
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < M; k++) {
C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k];
C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k];
}
}
}
}
void syr2k_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *Cinit) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
Cinit[i * N + j] *= BETA;
}
}
DATA_TYPE sum;
//#pragma omp target map(to: A[:N*M], B[:N*M]) map(tofrom: C[:N*N]) device
//(DEVICE_ID)
/*#pragma omp parallel for //collapse(2)
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
//sum = 0;
for (k = 0; k < M; k++)
{
C[i*N + j] += ALPHA * A[i*M + k] * B[j*M + k];
C[i*N + j] += ALPHA * B[i*M + k] * A[j*M + k];
}
//C[i*N + j] += sum;
}
}*/
#pragma omp target map(to : A[ : N *M], \
B[ : N *M], Cinit[ : N *N]) \
map(from : C[ : N *N]) device(DEVICE_ID)
#pragma omp parallel for // collapse(2)
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
C[i * N + j] = Cinit[i * N + j];
for (int k = 0; k < M; k++) {
C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k];
C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k];
}
}
}
}
int compareResults(DATA_TYPE *C, DATA_TYPE *C_Gpu) {
int i, j, fail;
fail = 0;
// Compare C with D
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
if (percentDiff(C[i * N + j], C_Gpu[i * N + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
int main() {
double t_start, t_end;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *B;
DATA_TYPE *C;
DATA_TYPE *Cinit;
DATA_TYPE *C_Gpu;
A = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
B = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
C = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
Cinit = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
C_Gpu = (DATA_TYPE *)calloc(N * M, sizeof(DATA_TYPE));
fprintf(stdout, "<< Symmetric rank-2k operations >>\n");
init_arrays(A, B, Cinit);
t_start = rtclock();
syr2k_OMP(A, B, C_Gpu, Cinit);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
init_arrays(A, B, C);
t_start = rtclock();
syr2k(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(C, C_Gpu);
#endif
free(A);
free(B);
free(C);
free(C_Gpu);
return fail;
}
|
wave1d_b.c | /* Generated by TAPENADE (INRIA, Ecuador team)
Tapenade 3.14 (r7079) - 5 Oct 2018 09:55
*/
#include <adBuffer.h>
/*
Differentiation of wave1d in reverse (adjoint) mode:
gradient of useful results: *u *u_1 *u_2
with respect to varying inputs: *u *u_1 *u_2
RW status of diff variables: *u:in-out *u_1:incr *u_2:incr
Plus diff mem management of: u:in u_1:in u_2:in
*/
void wave1d_b(double *u, double *ub, double *u_1, double *u_1b, double *u_2,
double *u_2b, double *c, double D, int n) {
int i;
double tempb;
#pragma omp parallel for private(tempb)
for (i = n-2; i > 0; --i) {
tempb = D*c[i]*ub[i];
#pragma omp atomic
u_1b[i - 1] = u_1b[i - 1] + tempb;
#pragma omp atomic
u_1b[i] = u_1b[i] + 2.0*ub[i] - 2*tempb;
#pragma omp atomic
u_1b[i + 1] = u_1b[i + 1] + tempb;
u_2b[i] = u_2b[i] - ub[i];
}
}
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/composite-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image)
% MagickBooleanType AutoGammaImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o channel: The channels to auto-level. If the special 'SyncChannels'
% flag is set all given channels is adjusted in the same way using the
% mean average of those channels.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image)
{
return(AutoGammaImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType AutoGammaImageChannel(Image *image,
const ChannelType channel)
{
MagickStatusType
status;
double
mean,sans,gamma,logmean;
logmean=log(0.5);
if ((channel & SyncChannels) != 0 )
{
/*
Apply gamma correction equally accross all given channels
*/
(void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception);
gamma=log(mean*QuantumScale)/logmean;
return LevelImageChannel(image, channel,
0.0, (double)QuantumRange, gamma);
}
/*
auto-gamma each channel separateally
*/
status = MagickTrue;
if ((channel & RedChannel) != 0)
{
(void) GetImageChannelMean(image,RedChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status = status && LevelImageChannel(image, RedChannel,
0.0, (double)QuantumRange, gamma);
}
if ((channel & GreenChannel) != 0)
{
(void) GetImageChannelMean(image,GreenChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status = status && LevelImageChannel(image, GreenChannel,
0.0, (double)QuantumRange, gamma);
}
if ((channel & BlueChannel) != 0)
{
(void) GetImageChannelMean(image,BlueChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status = status && LevelImageChannel(image, BlueChannel,
0.0, (double)QuantumRange, gamma);
}
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
{
(void) GetImageChannelMean(image,OpacityChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status = status && LevelImageChannel(image, OpacityChannel,
0.0, (double)QuantumRange, gamma);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
(void) GetImageChannelMean(image,IndexChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status = status && LevelImageChannel(image, IndexChannel,
0.0, (double)QuantumRange, gamma);
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image)
% MagickBooleanType AutoLevelImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o channel: The channels to auto-level. If the special 'SyncChannels'
% flag is set the min/max/mean value of all given channels is used for
% all given channels, to all channels in the same way.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image)
{
return(AutoLevelImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType AutoLevelImageChannel(Image *image,
const ChannelType channel)
{
/*
This is simply a convenience function around a Min/Max Histogram Stretch
*/
return MinMaxStretchImage(image, channel, 0.0, 0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast)
% MagickBooleanType BrightnessContrastImageChannel(Image *image,
% const ChannelType channel,const double brightness,
% const double contrast)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast)
{
MagickBooleanType
status;
status=BrightnessContrastImageChannel(image,DefaultChannels,brightness,
contrast);
return(status);
}
MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image,
const ChannelType channel,const double brightness,const double contrast)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
intercept,
coefficients[2],
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients,
&image->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MaxTextExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelPacket
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,&image->exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetMagickToken(p,&p,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power)))));
cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power)))));
cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power)))));
}
if (image->storage_class == PseudoClass)
{
/*
Apply transfer function to colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
double
luma;
luma=0.2126*image->colormap[i].red+0.7152*image->colormap[i].green+
0.0722*image->colormap[i].blue;
image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation*
cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma);
image->colormap[i].green=ClampToQuantum(luma+
color_correction.saturation*cdl_map[ScaleQuantumToMap(
image->colormap[i].green)].green-luma);
image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation*
cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma);
}
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.2126*GetPixelRed(q)+0.7152*GetPixelGreen(q)+
0.0722*GetPixelBlue(q);
SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma)));
SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma)));
SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorDecisionListImageChannel)
#endif
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image)
% MagickBooleanType ClutImageChannel(Image *image,
% const ChannelType channel,Image *clut_image)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image)
{
return(ClutImageChannel(image,DefaultChannels,clut_image));
}
MagickExport MagickBooleanType ClutImageChannel(Image *image,
const ChannelType channel,const Image *clut_image)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
*clut_map;
register ssize_t
i;
ssize_t
adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*clut_map));
if (clut_map == (MagickPixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
exception=(&image->exception);
clut_view=AcquireCacheView(clut_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetMagickPixelPacket(clut_image,clut_map+i);
(void) InterpolateMagickPixelPacket(clut_image,clut_view,
UndefinedInterpolatePixel,QuantumScale*i*(clut_image->columns-adjust),
QuantumScale*i*(clut_image->rows-adjust),clut_map+i,exception);
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
GetMagickPixelPacket(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampPixelRed(clut_map+
ScaleQuantumToMap(GetPixelRed(q))));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampPixelGreen(clut_map+
ScaleQuantumToMap(GetPixelGreen(q))));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampPixelBlue(clut_map+
ScaleQuantumToMap(GetPixelBlue(q))));
if ((channel & OpacityChannel) != 0)
{
if (clut_image->matte == MagickFalse)
SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+
ScaleQuantumToMap((Quantum) GetPixelAlpha(q))));
else
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampPixelOpacity(clut_map+
ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel))));
else
SetPixelOpacity(q,ClampPixelOpacity(
clut_map+ScaleQuantumToMap(GetPixelOpacity(q))));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t)
GetPixelIndex(indexes+x))->index));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClutImageChannel)
#endif
proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map);
if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
*/
static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
Contrast(sign,&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
blue,
green,
red;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=GetPixelRed(q);
green=GetPixelGreen(q);
blue=GetPixelBlue(q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastImage)
#endif
proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by `stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% `enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels)
% MagickBooleanType ContrastStretchImageChannel(Image *image,
% const size_t channel,const double black_point,
% const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const char *levels)
{
double
black_point,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) image->columns*image->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) QuantumRange/100.0;
white_point*=(double) QuantumRange/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) image->columns*image->rows-black_point;
status=ContrastStretchImageChannel(image,DefaultChannels,black_point,
white_point);
return(status);
}
MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point)
{
#define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
intensity;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
black,
*histogram,
*stretch_map,
white;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
stretch_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*stretch_map));
if ((histogram == (MagickPixelPacket *) NULL) ||
(stretch_map == (MagickPixelPacket *) NULL))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
status=MagickTrue;
exception=(&image->exception);
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (channel == DefaultChannels)
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
intensity;
intensity=PixelIntensityToQuantum(p);
histogram[ScaleQuantumToMap(intensity)].red++;
histogram[ScaleQuantumToMap(intensity)].green++;
histogram[ScaleQuantumToMap(intensity)].blue++;
histogram[ScaleQuantumToMap(intensity)].index++;
p++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(GetPixelIndex(
indexes+x))].index++;
p++;
}
}
/*
Find the histogram boundaries by locating the black/white levels.
*/
black.red=0.0;
white.red=MaxRange(QuantumRange);
if ((channel & RedChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].red;
if (intensity > black_point)
break;
}
black.red=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].red;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.red=(MagickRealType) i;
}
black.green=0.0;
white.green=MaxRange(QuantumRange);
if ((channel & GreenChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].green;
if (intensity > black_point)
break;
}
black.green=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].green;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.green=(MagickRealType) i;
}
black.blue=0.0;
white.blue=MaxRange(QuantumRange);
if ((channel & BlueChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].blue;
if (intensity > black_point)
break;
}
black.blue=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].blue;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.blue=(MagickRealType) i;
}
black.opacity=0.0;
white.opacity=MaxRange(QuantumRange);
if ((channel & OpacityChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].opacity;
if (intensity > black_point)
break;
}
black.opacity=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].opacity;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.opacity=(MagickRealType) i;
}
black.index=0.0;
white.index=MaxRange(QuantumRange);
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].index;
if (intensity > black_point)
break;
}
black.index=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].index;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.index=(MagickRealType) i;
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & RedChannel) != 0)
{
if (i < (ssize_t) black.red)
stretch_map[i].red=0.0;
else
if (i > (ssize_t) white.red)
stretch_map[i].red=(MagickRealType) QuantumRange;
else
if (black.red != white.red)
stretch_map[i].red=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.red)/(white.red-black.red)));
}
if ((channel & GreenChannel) != 0)
{
if (i < (ssize_t) black.green)
stretch_map[i].green=0.0;
else
if (i > (ssize_t) white.green)
stretch_map[i].green=(MagickRealType) QuantumRange;
else
if (black.green != white.green)
stretch_map[i].green=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.green)/(white.green-
black.green)));
}
if ((channel & BlueChannel) != 0)
{
if (i < (ssize_t) black.blue)
stretch_map[i].blue=0.0;
else
if (i > (ssize_t) white.blue)
stretch_map[i].blue=(MagickRealType) QuantumRange;
else
if (black.blue != white.blue)
stretch_map[i].blue=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.blue)/(white.blue-
black.blue)));
}
if ((channel & OpacityChannel) != 0)
{
if (i < (ssize_t) black.opacity)
stretch_map[i].opacity=0.0;
else
if (i > (ssize_t) white.opacity)
stretch_map[i].opacity=(MagickRealType) QuantumRange;
else
if (black.opacity != white.opacity)
stretch_map[i].opacity=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.opacity)/(white.opacity-
black.opacity)));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (i < (ssize_t) black.index)
stretch_map[i].index=0.0;
else
if (i > (ssize_t) white.index)
stretch_map[i].index=(MagickRealType) QuantumRange;
else
if (black.index != white.index)
stretch_map[i].index=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.index)/(white.index-
black.index)));
}
}
/*
Stretch the image.
*/
if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)))
image->storage_class=DirectClass;
if (image->storage_class == PseudoClass)
{
/*
Stretch colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
image->colormap[i].red=ClampToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].red)].red);
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
image->colormap[i].green=ClampToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].green)].green);
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
image->colormap[i].blue=ClampToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue);
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
image->colormap[i].opacity=ClampToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity);
}
}
}
/*
Stretch image.
*/
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
SetPixelRed(q,ClampToQuantum(stretch_map[
ScaleQuantumToMap(GetPixelRed(q))].red));
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
SetPixelGreen(q,ClampToQuantum(stretch_map[
ScaleQuantumToMap(GetPixelGreen(q))].green));
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
SetPixelBlue(q,ClampToQuantum(stretch_map[
ScaleQuantumToMap(GetPixelBlue(q))].blue));
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
SetPixelOpacity(q,ClampToQuantum(stretch_map[
ScaleQuantumToMap(GetPixelOpacity(q))].opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (black.index != white.index)
SetPixelIndex(indexes+x,ClampToQuantum(stretch_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastStretchImageChannel)
#endif
proceed=SetImageProgress(image,ContrastStretchImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(MagickPixelPacket *) RelinquishMagickMemory(stretch_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define Enhance(weight) \
mean=((MagickRealType) GetPixelRed(r)+pixel.red)/2; \
distance=(MagickRealType) GetPixelRed(r)-(MagickRealType) pixel.red; \
distance_squared=QuantumScale*(2.0*((MagickRealType) QuantumRange+1.0)+ \
mean)*distance*distance; \
mean=((MagickRealType) GetPixelGreen(r)+pixel.green)/2; \
distance=(MagickRealType) GetPixelGreen(r)-(MagickRealType) \
pixel.green; \
distance_squared+=4.0*distance*distance; \
mean=((MagickRealType) GetPixelBlue(r)+pixel.blue)/2; \
distance=(MagickRealType) GetPixelBlue(r)-(MagickRealType) \
pixel.blue; \
distance_squared+=QuantumScale*(3.0*((MagickRealType) \
QuantumRange+1.0)-1.0-mean)*distance*distance; \
mean=((MagickRealType) r->opacity+pixel.opacity)/2; \
distance=(MagickRealType) r->opacity-(MagickRealType) pixel.opacity; \
distance_squared+=QuantumScale*(3.0*((MagickRealType) \
QuantumRange+1.0)-1.0-mean)*distance*distance; \
if (distance_squared < ((MagickRealType) QuantumRange*(MagickRealType) \
QuantumRange/25.0f)) \
{ \
aggregate.red+=(weight)*GetPixelRed(r); \
aggregate.green+=(weight)*GetPixelGreen(r); \
aggregate.blue+=(weight)*GetPixelBlue(r); \
aggregate.opacity+=(weight)*GetPixelOpacity(r); \
total_weight+=(weight); \
} \
r++;
#define EnhanceImageTag "Enhance/Image"
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((image->columns < 5) || (image->rows < 5))
return((Image *) NULL);
enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse)
{
InheritException(exception,&enhance_image->exception);
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireCacheView(image);
enhance_view=AcquireCacheView(enhance_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
/*
Read another scan line.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
aggregate;
MagickRealType
distance,
distance_squared,
mean,
total_weight;
PixelPacket
pixel;
register const PixelPacket
*restrict r;
/*
Compute weighted average of target pixel color components.
*/
aggregate=zero;
total_weight=0.0;
r=p+2*(image->columns+4)+2;
pixel=(*r);
r=p;
Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0);
r=p+(image->columns+4);
Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0);
r=p+2*(image->columns+4);
Enhance(10.0); Enhance(40.0); Enhance(80.0); Enhance(40.0); Enhance(10.0);
r=p+3*(image->columns+4);
Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0);
r=p+4*(image->columns+4);
Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0);
SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight);
SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/
total_weight);
SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight);
SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/
total_weight);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EnhanceImage)
#endif
proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image)
% MagickBooleanType EqualizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image)
{
return(EqualizeImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType EqualizeImageChannel(Image *image,
const ChannelType channel)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
black,
*equalize_map,
*histogram,
intensity,
*map,
white;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*equalize_map));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map));
if ((equalize_map == (MagickPixelPacket *) NULL) ||
(histogram == (MagickPixelPacket *) NULL) ||
(map == (MagickPixelPacket *) NULL))
{
if (map != (MagickPixelPacket *) NULL)
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (equalize_map != (MagickPixelPacket *) NULL)
equalize_map=(MagickPixelPacket *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
exception=(&image->exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
p++;
}
}
/*
Integrate the histogram to get the equalization map.
*/
(void) ResetMagickMemory(&intensity,0,sizeof(intensity));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & RedChannel) != 0)
intensity.red+=histogram[i].red;
if ((channel & GreenChannel) != 0)
intensity.green+=histogram[i].green;
if ((channel & BlueChannel) != 0)
intensity.blue+=histogram[i].blue;
if ((channel & OpacityChannel) != 0)
intensity.opacity+=histogram[i].opacity;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
intensity.index+=histogram[i].index;
map[i]=intensity;
}
black=map[0];
white=map[(int) MaxMap];
(void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (((channel & RedChannel) != 0) && (white.red != black.red))
equalize_map[i].red=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].red-black.red))/(white.red-black.red)));
if (((channel & GreenChannel) != 0) && (white.green != black.green))
equalize_map[i].green=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].green-black.green))/(white.green-black.green)));
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
equalize_map[i].blue=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].blue-black.blue))/(white.blue-black.blue)));
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
equalize_map[i].opacity=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) ((MaxMap*(map[i].opacity-black.opacity))/
(white.opacity-black.opacity)));
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
equalize_map[i].index=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].index-black.index))/(white.index-black.index)));
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
/*
Equalize colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if (((channel & RedChannel) != 0) && (white.red != black.red))
image->colormap[i].red=ClampToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red);
if (((channel & GreenChannel) != 0) && (white.green != black.green))
image->colormap[i].green=ClampToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].green);
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
image->colormap[i].blue=ClampToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue);
if (((channel & OpacityChannel) != 0) &&
(white.opacity != black.opacity))
image->colormap[i].opacity=ClampToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity);
}
}
/*
Equalize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (((channel & RedChannel) != 0) && (white.red != black.red))
SetPixelRed(q,ClampToQuantum(equalize_map[
ScaleQuantumToMap(GetPixelRed(q))].red));
if (((channel & GreenChannel) != 0) && (white.green != black.green))
SetPixelGreen(q,ClampToQuantum(equalize_map[
ScaleQuantumToMap(GetPixelGreen(q))].green));
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
SetPixelBlue(q,ClampToQuantum(equalize_map[
ScaleQuantumToMap(GetPixelBlue(q))].blue));
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
SetPixelOpacity(q,ClampToQuantum(equalize_map[
ScaleQuantumToMap(GetPixelOpacity(q))].opacity));
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
SetPixelIndex(indexes+x,ClampToQuantum(equalize_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].index));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EqualizeImageChannel)
#endif
proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(MagickPixelPacket *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const char *level)
% MagickBooleanType GammaImageChannel(Image *image,
% const ChannelType channel,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
MagickExport MagickBooleanType GammaImage(Image *image,const char *level)
{
GeometryInfo
geometry_info;
MagickPixelPacket
gamma;
MagickStatusType
flags,
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (level == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(level,&geometry_info);
gamma.red=geometry_info.rho;
gamma.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
gamma.green=gamma.red;
gamma.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
gamma.blue=gamma.red;
if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0))
return(MagickTrue);
if ((gamma.red == gamma.green) && (gamma.green == gamma.blue))
status=GammaImageChannel(image,(const ChannelType) (RedChannel |
GreenChannel | BlueChannel),(double) gamma.red);
else
{
status=GammaImageChannel(image,RedChannel,(double) gamma.red);
status|=GammaImageChannel(image,GreenChannel,(double) gamma.green);
status|=GammaImageChannel(image,BlueChannel,(double) gamma.blue);
}
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType GammaImageChannel(Image *image,
const ChannelType channel,const double gamma)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma))));
if (image->storage_class == PseudoClass)
{
/*
Gamma-correct colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=gamma_map[
ScaleQuantumToMap(image->colormap[i].red)];
if ((channel & GreenChannel) != 0)
image->colormap[i].green=gamma_map[
ScaleQuantumToMap(image->colormap[i].green)];
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=gamma_map[
ScaleQuantumToMap(image->colormap[i].blue)];
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
image->colormap[i].opacity=gamma_map[
ScaleQuantumToMap(image->colormap[i].opacity)];
else
image->colormap[i].opacity=(Quantum) QuantumRange-
gamma_map[ScaleQuantumToMap((Quantum) (QuantumRange-
image->colormap[i].opacity))];
}
}
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (channel == DefaultChannels)
{
SetPixelRed(q,gamma_map[ScaleQuantumToMap(
GetPixelRed(q))]);
SetPixelGreen(q,gamma_map[ScaleQuantumToMap(
GetPixelGreen(q))]);
SetPixelBlue(q,gamma_map[ScaleQuantumToMap(
GetPixelBlue(q))]);
}
else
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,gamma_map[ScaleQuantumToMap(
GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,gamma_map[ScaleQuantumToMap(
GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,gamma_map[ScaleQuantumToMap(
GetPixelBlue(q))]);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,gamma_map[ScaleQuantumToMap(
GetPixelOpacity(q))]);
else
SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum)
GetPixelAlpha(q))]);
}
}
q++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap(
GetPixelIndex(indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GammaImageChannel)
#endif
proceed=SetImageProgress(image,GammaCorrectImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image)
% MagickBooleanType HaldClutImageChannel(Image *image,
% const ChannelType channel,Image *hald_image)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o channel: the channel.
%
*/
static inline size_t MagickMin(const size_t x,const size_t y)
{
if (x < y)
return(x);
return(y);
}
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image)
{
return(HaldClutImageChannel(image,DefaultChannels,hald_image));
}
MagickExport MagickBooleanType HaldClutImageChannel(Image *image,
const ChannelType channel,const Image *hald_image)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
MagickRealType
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=MagickMin(hald_image->columns,hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetMagickPixelPacket(hald_image,&zero);
exception=(&image->exception);
image_view=AcquireCacheView(image);
hald_view=AcquireCacheView(hald_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
offset;
HaldInfo
point;
MagickPixelPacket
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(hald_view);
pixel=zero;
pixel1=zero;
pixel2=zero;
pixel3=zero;
pixel4=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
point.x=QuantumScale*(level-1.0)*GetPixelRed(q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width),
&pixel1,exception);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/
width),&pixel2,exception);
MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2,
pixel2.opacity,point.y,&pixel3);
offset+=cube_size;
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width),
&pixel1,exception);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/
width),&pixel2,exception);
MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2,
pixel2.opacity,point.y,&pixel4);
MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4,
pixel4.opacity,point.z,&pixel);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(pixel.index));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HaldClutImageChannel)
#endif
proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImageChannel() and LevelizeImageChannel(), below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o levels: Specify the levels where the black and white points have the
% range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2).
% A '!' flag inverts the re-mapping.
%
*/
MagickExport MagickBooleanType LevelImage(Image *image,const char *levels)
{
double
black_point,
gamma,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(double) image->columns*image->rows/100.0;
white_point*=(double) image->columns*image->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) QuantumRange-black_point;
if ((flags & AspectValue ) == 0)
status=LevelImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
else
status=LevelizeImage(image,black_point,white_point,gamma);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the normal level operation to the image, spreading
% out the values between the black and white points over the entire range of
% values. Gamma correction is also applied after the values has been mapped.
%
% It is typically used to improve image contrast, or to provide a controlled
% linear threshold for the image. If the black and white points are set to
% the minimum and maximum values found in the image, the image can be
% normalized. or by swapping black and white values, negate the image.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma)
% MagickBooleanType LevelizeImageChannel(Image *image,
% const ChannelType channel,const double black_point,
% const double white_point,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level which is to be mapped to zero (black)
%
% o white_point: The level which is to be mapped to QuantiumRange (white)
%
% o gamma: adjust gamma by this factor before mapping values.
% use 1.0 for purely linear stretching of image color values
%
*/
static inline MagickRealType LevelPixel(const double black_point,
const double white_point,const double gamma,const MagickRealType pixel)
{
double
level_pixel,
scale;
if (pixel < black_point)
return(0.0);
if (pixel > white_point)
return((MagickRealType) QuantumRange);
scale=(white_point != black_point) ? 1.0/(white_point-black_point) : 1.0;
level_pixel=(MagickRealType) QuantumRange*pow(scale*((double) pixel-
black_point),1.0/gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((channel & GreenChannel) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=(double) ClampToQuantum(LevelPixel(
black_point,white_point,gamma,image->colormap[i].opacity));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelRed(q))));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelGreen(q))));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelBlue(q))));
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelOpacity(q))));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point,
white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelImageChannel)
#endif
proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImageChannel() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImageChannel() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used for example de-contrast a greyscale image to the exact
% levels specified. Or by using specific levels for each channel of an image
% you can convert a gray-scale image to any linear color gradient, according
% to those levels.
%
% The format of the LevelizeImageChannel method is:
%
% MagickBooleanType LevelizeImageChannel(Image *image,
% const ChannelType channel,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantiumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma)
{
MagickBooleanType
status;
status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
return(status);
}
MagickExport MagickBooleanType LevelizeImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) (ClampToQuantum(((MagickRealType) \
pow((double)(QuantumScale*(x)),1.0/gamma))*(white_point-black_point)+ \
black_point))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=LevelizeValue(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=LevelizeValue(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=LevelizeValue(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=LevelizeValue(image->colormap[i].opacity);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,LevelizeValue(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,LevelizeValue(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,LevelizeValue(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
SetPixelOpacity(q,LevelizeValue(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,LevelizeValue(
GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelizeImageChannel)
#endif
proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColor() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelColorsImageChannel method is:
%
% MagickBooleanType LevelColorsImage(Image *image,
% const MagickPixelPacket *black_color,
% const MagickPixelPacket *white_color,const MagickBooleanType invert)
% MagickBooleanType LevelColorsImageChannel(Image *image,
% const ChannelType channel,const MagickPixelPacket *black_color,
% const MagickPixelPacket *white_color,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
*/
MagickExport MagickBooleanType LevelColorsImage(Image *image,
const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
const MagickBooleanType invert)
{
MagickBooleanType
status;
status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color,
invert);
return(status);
}
MagickExport MagickBooleanType LevelColorsImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *black_color,
const MagickPixelPacket *white_color,const MagickBooleanType invert)
{
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickFalse;
if (invert == MagickFalse)
{
if ((channel & RedChannel) != 0)
status|=LevelImageChannel(image,RedChannel,
black_color->red,white_color->red,(double) 1.0);
if ((channel & GreenChannel) != 0)
status|=LevelImageChannel(image,GreenChannel,
black_color->green,white_color->green,(double) 1.0);
if ((channel & BlueChannel) != 0)
status|=LevelImageChannel(image,BlueChannel,
black_color->blue,white_color->blue,(double) 1.0);
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
status|=LevelImageChannel(image,OpacityChannel,
black_color->opacity,white_color->opacity,(double) 1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status|=LevelImageChannel(image,IndexChannel,
black_color->index,white_color->index,(double) 1.0);
}
else
{
if ((channel & RedChannel) != 0)
status|=LevelizeImageChannel(image,RedChannel,
black_color->red,white_color->red,(double) 1.0);
if ((channel & GreenChannel) != 0)
status|=LevelizeImageChannel(image,GreenChannel,
black_color->green,white_color->green,(double) 1.0);
if ((channel & BlueChannel) != 0)
status|=LevelizeImageChannel(image,BlueChannel,
black_color->blue,white_color->blue,(double) 1.0);
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
status|=LevelizeImageChannel(image,OpacityChannel,
black_color->opacity,white_color->opacity,(double) 1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status|=LevelizeImageChannel(image,IndexChannel,
black_color->index,white_color->index,(double) 1.0);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point)
{
#define LinearStretchImageTag "LinearStretch/Image"
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
*histogram,
intensity;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
if (histogram == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
exception=(&image->exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
histogram[ScaleQuantumToMap(PixelIntensityToQuantum(p))]++;
p++;
}
}
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(MagickRealType *) RelinquishMagickMemory(histogram);
status=LevelImageChannel(image,DefaultChannels,(double) black,(double) white,
1.0);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. And if the colorspace is
% HWB, use blackness, whiteness, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and
% hue.
%
*/
static void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,
Quantum *red,Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,
Quantum *red,Quantum *green,Quantum *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static void ModulateHWB(const double percent_hue,const double percent_whiteness, const double percent_blackness,Quantum *red,Quantum *green,Quantum *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
ExceptionInfo
*exception;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
{
/*
Modulate colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
switch (colorspace)
{
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
break;
}
}
}
/*
Modulate image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
blue,
green,
red;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=GetPixelRed(q);
green=GetPixelGreen(q);
blue=GetPixelBlue(q);
switch (colorspace)
{
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
}
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ModulateImage)
#endif
proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImageChannel method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale)
% MagickBooleanType NegateImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType grayscale)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale)
{
MagickBooleanType
status;
status=NegateImageChannel(image,DefaultChannels,grayscale);
return(status);
}
MagickExport MagickBooleanType NegateImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType grayscale)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
/*
Negate colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if (grayscale != MagickFalse)
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((channel & RedChannel) != 0)
image->colormap[i].red=(Quantum) QuantumRange-
image->colormap[i].red;
if ((channel & GreenChannel) != 0)
image->colormap[i].green=(Quantum) QuantumRange-
image->colormap[i].green;
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=(Quantum) QuantumRange-
image->colormap[i].blue;
}
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
if (grayscale != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRed(q) != GetPixelGreen(q)) ||
(GetPixelGreen(q) != GetPixelBlue(q)))
{
q++;
continue;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,QuantumRange-GetPixelRed(q));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,QuantumRange-GetPixelGreen(q));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,QuantumRange-GetPixelBlue(q));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,QuantumRange-
GetPixelOpacity(q));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,QuantumRange-
GetPixelIndex(indexes+x));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImageChannel)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,QuantumRange-GetPixelRed(q));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,QuantumRange-GetPixelGreen(q));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,QuantumRange-GetPixelBlue(q));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,QuantumRange-
GetPixelIndex(indexes+x));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImageChannel)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image)
% MagickBooleanType NormalizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image)
{
MagickBooleanType
status;
status=NormalizeImageChannel(image,DefaultChannels);
return(status);
}
MagickExport MagickBooleanType NormalizeImageChannel(Image *image,
const ChannelType channel)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImageChannel(image,channel,black_point,white_point));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels)
% MagickBooleanType SigmoidalContrastImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType sharpen,
% const double contrast,const double midpoint)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o sharpen: Increase or decrease image contrast.
%
% o alpha: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o beta: midpoint of the function as a color value 0 to QuantumRange.
%
*/
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const char *levels)
{
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
flags=ParseGeometry(levels,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0*QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0;
status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen,
geometry_info.rho,geometry_info.sigma);
return(status);
}
MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType sharpen,
const double contrast,const double midpoint)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*sigmoidal_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize sigmoidal maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*sigmoidal_map));
if (sigmoidal_map == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (sharpen != MagickFalse)
{
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType)
(MaxMap*((1.0/(1.0+exp(contrast*(midpoint/(double) QuantumRange-
(double) i/MaxMap))))-(1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange)))))/((1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange-1.0))))-(1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange)))))+0.5));
continue;
}
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType)
(MaxMap*(QuantumScale*midpoint-log((1.0-(1.0/(1.0+exp(midpoint/
(double) QuantumRange*contrast))+((double) i/MaxMap)*((1.0/
(1.0+exp(contrast*(midpoint/(double) QuantumRange-1.0))))-(1.0/
(1.0+exp(midpoint/(double) QuantumRange*contrast))))))/
(1.0/(1.0+exp(midpoint/(double) QuantumRange*contrast))+
((double) i/MaxMap)*((1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange-1.0))))-(1.0/(1.0+exp(midpoint/
(double) QuantumRange*contrast))))))/contrast)));
}
if (image->storage_class == PseudoClass)
{
/*
Sigmoidal-contrast enhance colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].red)]);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].green)]);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].blue)]);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].opacity)]);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelRed(q))]));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelGreen(q))]));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelBlue(q))]));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(GetPixelOpacity(q))]));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))]));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SigmoidalContrastImageChannel)
#endif
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map);
return(status);
}
|
kmp_atomic_float10_max_min.c | // RUN: %libomp-compile -mlong-double-80 && %libomp-run
// UNSUPPORTED: gcc
#include <stdio.h>
#include <omp.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef void* ident_t;
extern void __kmpc_atomic_float10_max(ident_t *id_ref, int gtid,
long double *lhs, long double rhs);
extern void __kmpc_atomic_float10_min(ident_t *id_ref, int gtid,
long double *lhs, long double rhs);
extern long double __kmpc_atomic_float10_max_cpt(ident_t *id_ref, int gtid,
long double *lhs,
long double rhs, int flag);
extern long double __kmpc_atomic_float10_min_cpt(ident_t *id_ref, int gtid,
long double *lhs,
long double rhs, int flag);
#ifdef __cplusplus
}
#endif
int main() {
int ret = 0;
long double s = 012.3456; // small
long double e = 123.4567; // middle
long double d = 234.5678; // big
long double x = 123.4567; // object
long double v = 0.; // captured value
// initialize OpenMP runtime library
omp_set_num_threads(4);
// max
// #pragma omp atomic compare update
// if (x < d) x = d;
__kmpc_atomic_float10_max(NULL, 0, &x, d);
if (x != d) {
ret++;
printf("Error max: %Lf != %Lf\n", x, d);
}
__kmpc_atomic_float10_max(NULL, 0, &x, s); // no-op
if (x != d) {
ret++;
printf("Error max: %Lf != %Lf\n", x, d);
}
// min
// #pragma omp atomic compare update
// if (x > s) x = s;
__kmpc_atomic_float10_min(NULL, 0, &x, s);
if (x != s) {
ret++;
printf("Error min: %Lf != %Lf\n", x, s);
}
__kmpc_atomic_float10_min(NULL, 0, &x, e); // no-op
if (x != s) {
ret++;
printf("Error min: %Lf != %Lf\n", x, s);
}
// max_cpt old
// #pragma omp atomic compare update capture
// { v = x; if (x < d) x = d; }
v = __kmpc_atomic_float10_max_cpt(NULL, 0, &x, d, 0);
if (x != d) {
ret++;
printf("Error max_cpt obj: %Lf != %Lf\n", x, d);
}
if (v != s) {
ret++;
printf("Error max_cpt cpt: %Lf != %Lf\n", v, s);
}
v = __kmpc_atomic_float10_max_cpt(NULL, 0, &x, e, 0); // no-op
if (x != d) {
ret++;
printf("Error max_cpt obj: %Lf != %Lf\n", x, d);
}
if (v != d) {
ret++;
printf("Error max_cpt cpt: %Lf != %Lf\n", v, d);
}
// min_cpt old
// #pragma omp atomic compare update capture
// { v = x; if (x > d) x = d; }
v = __kmpc_atomic_float10_min_cpt(NULL, 0, &x, s, 0);
if (x != s) {
ret++;
printf("Error min_cpt obj: %Lf != %Lf\n", x, s);
}
if (v != d) {
ret++;
printf("Error min_cpt cpt: %Lf != %Lf\n", v, d);
}
v = __kmpc_atomic_float10_min_cpt(NULL, 0, &x, e, 0); // no-op
if (x != s) {
ret++;
printf("Error max_cpt obj: %Lf != %Lf\n", x, s);
}
if (v != s) {
ret++;
printf("Error max_cpt cpt: %Lf != %Lf\n", v, s);
}
// max_cpt new
// #pragma omp atomic compare update capture
// { if (x < d) x = d; v = x; }
v = __kmpc_atomic_float10_max_cpt(NULL, 0, &x, d, 1);
if (x != d) {
ret++;
printf("Error max_cpt obj: %Lf != %Lf\n", x, d);
}
if (v != d) {
ret++;
printf("Error max_cpt cpt: %Lf != %Lf\n", v, d);
}
v = __kmpc_atomic_float10_max_cpt(NULL, 0, &x, e, 1); // no-op
if (x != d) {
ret++;
printf("Error max_cpt obj: %Lf != %Lf\n", x, d);
}
if (v != d) {
ret++;
printf("Error max_cpt cpt: %Lf != %Lf\n", v, d);
}
// min_cpt new
// #pragma omp atomic compare update capture
// { if (x > d) x = d; v = x; }
v = __kmpc_atomic_float10_min_cpt(NULL, 0, &x, s, 1);
if (x != s) {
ret++;
printf("Error min_cpt obj: %Lf != %Lf\n", x, s);
}
if (v != s) {
ret++;
printf("Error min_cpt cpt: %Lf != %Lf\n", v, s);
}
v = __kmpc_atomic_float10_min_cpt(NULL, 0, &x, e, 1); // no-op
if (x != s) {
ret++;
printf("Error max_cpt obj: %Lf != %Lf\n", x, s);
}
if (v != s) {
ret++;
printf("Error max_cpt cpt: %Lf != %Lf\n", v, s);
}
if (ret == 0)
printf("passed\n");
return ret;
}
|
ex1.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define CHUNKSIZE 10
#define N 10000000 // 10 mil
float a[N], b[N], c[N];
int main (int argc, char *argv[])
{
int nthreads, tid, i, chunk;
/* Some initializations */
for (i = 0; i < N; i++) {
a[i] = b[i] = i * 1.0;
}
chunk = CHUNKSIZE;
double t1, t2;
t1 = omp_get_wtime();
#pragma omp parallel shared(a, b, c, nthreads, chunk) private(i, tid)
{
tid = omp_get_thread_num();
if (tid == 0) {
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
#pragma omp for schedule(static, chunk)
for (i = 0; i < N; i++) {
c[i] = a[i] + b[i];
}
}
t2 = omp_get_wtime();
printf("Execution time: %g\n", t2 - t1);
return 0;
}
|
work.c | #include<stdio.h>
#include<omp.h>
#define N 1000
void add(int *a, int *b, int *c, int min, int max);
void mul(int *a, int *b, int *d, int min, int max);
void add_add(int *a, int *b, int *e, int min, int max);
void mul_mul(int *a, int *b, int *f, int min, int max);
int main(){
int a[N];
int b[N];
int c[N];
int d[N];
int e[N];
int f[N];
int i;
#pragma omp parallel for
for(i=0; i < N; i++){
a[i] = i;
b[i] = 1;
}
#pragma omp parallel
{
int thread_id = omp_get_thread_num();
int num = N/4;
int thread_part = num * thread_id;
add(a,b,c,thread_part,thread_part + num);
mul(a,b,d,thread_part,thread_part + num);
add_add(a,b,e,thread_part,thread_part + num);
mul_mul(a,b,f,thread_part,thread_part + num);
}
for(i=0; i < N; i++){
printf("c : %d\n",c[i]);
printf("d : %d\n",d[i]);
printf("e : %d\n",e[i]);
printf("f : %d\n",f[i]);
}
return 0;
}
|
displacement_op_cpu.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef DISPLACEMENT_OP_CPU_H_
#define DISPLACEMENT_OP_CPU_H_
#include <array>
#include <cmath>
#include <vector>
#include "bound_space_op.h"
#include "math_util.h"
#include "param.h"
#include "simulation.h"
namespace bdm {
template <typename TSimulation = Simulation<>>
class DisplacementOpCpu {
public:
DisplacementOpCpu() {}
~DisplacementOpCpu() {}
template <typename TContainer>
void operator()(TContainer* sim_objects, uint16_t type_idx) const {
std::vector<std::array<double, 3>> sim_object_movements;
sim_object_movements.reserve(sim_objects->size());
auto* sim = TSimulation::GetActive();
auto* grid = sim->GetGrid();
auto search_radius = grid->GetLargestObjectSize();
double squared_radius = search_radius * search_radius;
#pragma omp parallel for shared(grid) firstprivate(squared_radius)
for (size_t i = 0; i < sim_objects->size(); i++) {
sim_object_movements[i] =
(*sim_objects)[i].CalculateDisplacement(grid, squared_radius);
}
// Set new positions after all updates have been calculated
// otherwise some sim_objects would see neighbors with already updated
// positions
// which would lead to inconsistencies
// FIXME there are still inconsistencies if there are more than one
// simulation
// object types!
auto* param = sim->GetParam();
#pragma omp parallel for
for (size_t i = 0; i < sim_objects->size(); i++) {
auto&& sim_object = (*sim_objects)[i];
sim_object.ApplyDisplacement(sim_object_movements[i]);
if (param->bound_space_) {
ApplyBoundingBox(&sim_object, param->min_bound_, param->max_bound_);
grid->SetDimensionThresholds(param->min_bound_, param->max_bound_);
}
}
}
};
} // namespace bdm
#endif // DISPLACEMENT_OP_CPU_H_
|
GB_unaryop__ainv_int8_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_bool
// op(A') function: GB_tran__ainv_int8_bool
// C type: int8_t
// A type: bool
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_bool
(
int8_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_nowait.2.c | /*
* @@name: nowait.2c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
#include <math.h>
void nowait_example2(int n, float *a, float *b, float *c, float *y, float
*z)
{
int i;
#pragma omp parallel
{
#pragma omp for schedule(static) nowait
for (i=0; i<n; i++)
c[i] = (a[i] + b[i]) / 2.0f;
#pragma omp for schedule(static) nowait
for (i=0; i<n; i++)
z[i] = sqrtf(c[i]);
#pragma omp for schedule(static) nowait
for (i=1; i<=n; i++)
y[i] = z[i-1] + a[i];
}
}
|
diagsv_x_coo_u.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, ALPHA_Number *y)
{
int num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT r = 0; r < A->rows; r++)
{
alpha_mul(y[r], alpha, x[r]);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
kmp_atomic.c | /*
* kmp_atomic.c -- ATOMIC implementation routines
* $Revision: 43421 $
* $Date: 2014-08-28 08:56:10 -0500 (Thu, 28 Aug 2014) $
*/
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
#include "kmp_atomic.h"
#include "kmp.h" // TRUE, asm routines prototypes
typedef unsigned char uchar;
typedef unsigned short ushort;
/*!
@defgroup ATOMIC_OPS Atomic Operations
These functions are used for implementing the many different varieties of atomic operations.
The compiler is at liberty to inline atomic operations that are naturally supported
by the target architecture. For instance on IA-32 architecture an atomic like this can be inlined
@code
static int s = 0;
#pragma omp atomic
s++;
@endcode
using the single instruction: `lock; incl s`
However the runtime does provide entrypoints for these operations to support compilers that choose
not to inline them. (For instance, `__kmpc_atomic_fixed4_add` could be used to perform the
increment above.)
The names of the functions are encoded by using the data type name and the operation name, as in these tables.
Data Type | Data type encoding
-----------|---------------
int8_t | `fixed1`
uint8_t | `fixed1u`
int16_t | `fixed2`
uint16_t | `fixed2u`
int32_t | `fixed4`
uint32_t | `fixed4u`
int32_t | `fixed8`
uint32_t | `fixed8u`
float | `float4`
double | `float8`
float 10 (8087 eighty bit float) | `float10`
complex<float> | `cmplx4`
complex<double> | `cmplx8`
complex<float10> | `cmplx10`
<br>
Operation | Operation encoding
----------|-------------------
+ | add
- | sub
\* | mul
/ | div
& | andb
<< | shl
\>\> | shr
\| | orb
^ | xor
&& | andl
\|\| | orl
maximum | max
minimum | min
.eqv. | eqv
.neqv. | neqv
<br>
For non-commutative operations, `_rev` can also be added for the reversed operation.
For the functions that capture the result, the suffix `_cpt` is added.
Update Functions
================
The general form of an atomic function that just performs an update (without a `capture`)
@code
void __kmpc_atomic_<datatype>_<operation>( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs );
@endcode
@param ident_t a pointer to source location
@param gtid the global thread id
@param lhs a pointer to the left operand
@param rhs the right operand
`capture` functions
===================
The capture functions perform an atomic update and return a result, which is either the value
before the capture, or that after. They take an additional argument to determine which result is returned.
Their general form is therefore
@code
TYPE __kmpc_atomic_<datatype>_<operation>_cpt( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag );
@endcode
@param ident_t a pointer to source location
@param gtid the global thread id
@param lhs a pointer to the left operand
@param rhs the right operand
@param flag one if the result is to be captured *after* the operation, zero if captured *before*.
The one set of exceptions to this is the `complex<float>` type where the value is not returned,
rather an extra argument pointer is passed.
They look like
@code
void __kmpc_atomic_cmplx4_<op>_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag );
@endcode
Read and Write Operations
=========================
The OpenMP<sup>*</sup> standard now supports atomic operations that simply ensure that the
value is read or written atomically, with no modification
performed. In many cases on IA-32 architecture these operations can be inlined since
the architecture guarantees that no tearing occurs on aligned objects
accessed with a single memory operation of up to 64 bits in size.
The general form of the read operations is
@code
TYPE __kmpc_atomic_<type>_rd ( ident_t *id_ref, int gtid, TYPE * loc );
@endcode
For the write operations the form is
@code
void __kmpc_atomic_<type>_wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs );
@endcode
Full list of functions
======================
This leads to the generation of 376 atomic functions, as follows.
Functons for integers
---------------------
There are versions here for integers of size 1,2,4 and 8 bytes both signed and unsigned (where that matters).
@code
__kmpc_atomic_fixed1_add
__kmpc_atomic_fixed1_add_cpt
__kmpc_atomic_fixed1_add_fp
__kmpc_atomic_fixed1_andb
__kmpc_atomic_fixed1_andb_cpt
__kmpc_atomic_fixed1_andl
__kmpc_atomic_fixed1_andl_cpt
__kmpc_atomic_fixed1_div
__kmpc_atomic_fixed1_div_cpt
__kmpc_atomic_fixed1_div_cpt_rev
__kmpc_atomic_fixed1_div_float8
__kmpc_atomic_fixed1_div_fp
__kmpc_atomic_fixed1_div_rev
__kmpc_atomic_fixed1_eqv
__kmpc_atomic_fixed1_eqv_cpt
__kmpc_atomic_fixed1_max
__kmpc_atomic_fixed1_max_cpt
__kmpc_atomic_fixed1_min
__kmpc_atomic_fixed1_min_cpt
__kmpc_atomic_fixed1_mul
__kmpc_atomic_fixed1_mul_cpt
__kmpc_atomic_fixed1_mul_float8
__kmpc_atomic_fixed1_mul_fp
__kmpc_atomic_fixed1_neqv
__kmpc_atomic_fixed1_neqv_cpt
__kmpc_atomic_fixed1_orb
__kmpc_atomic_fixed1_orb_cpt
__kmpc_atomic_fixed1_orl
__kmpc_atomic_fixed1_orl_cpt
__kmpc_atomic_fixed1_rd
__kmpc_atomic_fixed1_shl
__kmpc_atomic_fixed1_shl_cpt
__kmpc_atomic_fixed1_shl_cpt_rev
__kmpc_atomic_fixed1_shl_rev
__kmpc_atomic_fixed1_shr
__kmpc_atomic_fixed1_shr_cpt
__kmpc_atomic_fixed1_shr_cpt_rev
__kmpc_atomic_fixed1_shr_rev
__kmpc_atomic_fixed1_sub
__kmpc_atomic_fixed1_sub_cpt
__kmpc_atomic_fixed1_sub_cpt_rev
__kmpc_atomic_fixed1_sub_fp
__kmpc_atomic_fixed1_sub_rev
__kmpc_atomic_fixed1_swp
__kmpc_atomic_fixed1_wr
__kmpc_atomic_fixed1_xor
__kmpc_atomic_fixed1_xor_cpt
__kmpc_atomic_fixed1u_div
__kmpc_atomic_fixed1u_div_cpt
__kmpc_atomic_fixed1u_div_cpt_rev
__kmpc_atomic_fixed1u_div_fp
__kmpc_atomic_fixed1u_div_rev
__kmpc_atomic_fixed1u_shr
__kmpc_atomic_fixed1u_shr_cpt
__kmpc_atomic_fixed1u_shr_cpt_rev
__kmpc_atomic_fixed1u_shr_rev
__kmpc_atomic_fixed2_add
__kmpc_atomic_fixed2_add_cpt
__kmpc_atomic_fixed2_add_fp
__kmpc_atomic_fixed2_andb
__kmpc_atomic_fixed2_andb_cpt
__kmpc_atomic_fixed2_andl
__kmpc_atomic_fixed2_andl_cpt
__kmpc_atomic_fixed2_div
__kmpc_atomic_fixed2_div_cpt
__kmpc_atomic_fixed2_div_cpt_rev
__kmpc_atomic_fixed2_div_float8
__kmpc_atomic_fixed2_div_fp
__kmpc_atomic_fixed2_div_rev
__kmpc_atomic_fixed2_eqv
__kmpc_atomic_fixed2_eqv_cpt
__kmpc_atomic_fixed2_max
__kmpc_atomic_fixed2_max_cpt
__kmpc_atomic_fixed2_min
__kmpc_atomic_fixed2_min_cpt
__kmpc_atomic_fixed2_mul
__kmpc_atomic_fixed2_mul_cpt
__kmpc_atomic_fixed2_mul_float8
__kmpc_atomic_fixed2_mul_fp
__kmpc_atomic_fixed2_neqv
__kmpc_atomic_fixed2_neqv_cpt
__kmpc_atomic_fixed2_orb
__kmpc_atomic_fixed2_orb_cpt
__kmpc_atomic_fixed2_orl
__kmpc_atomic_fixed2_orl_cpt
__kmpc_atomic_fixed2_rd
__kmpc_atomic_fixed2_shl
__kmpc_atomic_fixed2_shl_cpt
__kmpc_atomic_fixed2_shl_cpt_rev
__kmpc_atomic_fixed2_shl_rev
__kmpc_atomic_fixed2_shr
__kmpc_atomic_fixed2_shr_cpt
__kmpc_atomic_fixed2_shr_cpt_rev
__kmpc_atomic_fixed2_shr_rev
__kmpc_atomic_fixed2_sub
__kmpc_atomic_fixed2_sub_cpt
__kmpc_atomic_fixed2_sub_cpt_rev
__kmpc_atomic_fixed2_sub_fp
__kmpc_atomic_fixed2_sub_rev
__kmpc_atomic_fixed2_swp
__kmpc_atomic_fixed2_wr
__kmpc_atomic_fixed2_xor
__kmpc_atomic_fixed2_xor_cpt
__kmpc_atomic_fixed2u_div
__kmpc_atomic_fixed2u_div_cpt
__kmpc_atomic_fixed2u_div_cpt_rev
__kmpc_atomic_fixed2u_div_fp
__kmpc_atomic_fixed2u_div_rev
__kmpc_atomic_fixed2u_shr
__kmpc_atomic_fixed2u_shr_cpt
__kmpc_atomic_fixed2u_shr_cpt_rev
__kmpc_atomic_fixed2u_shr_rev
__kmpc_atomic_fixed4_add
__kmpc_atomic_fixed4_add_cpt
__kmpc_atomic_fixed4_add_fp
__kmpc_atomic_fixed4_andb
__kmpc_atomic_fixed4_andb_cpt
__kmpc_atomic_fixed4_andl
__kmpc_atomic_fixed4_andl_cpt
__kmpc_atomic_fixed4_div
__kmpc_atomic_fixed4_div_cpt
__kmpc_atomic_fixed4_div_cpt_rev
__kmpc_atomic_fixed4_div_float8
__kmpc_atomic_fixed4_div_fp
__kmpc_atomic_fixed4_div_rev
__kmpc_atomic_fixed4_eqv
__kmpc_atomic_fixed4_eqv_cpt
__kmpc_atomic_fixed4_max
__kmpc_atomic_fixed4_max_cpt
__kmpc_atomic_fixed4_min
__kmpc_atomic_fixed4_min_cpt
__kmpc_atomic_fixed4_mul
__kmpc_atomic_fixed4_mul_cpt
__kmpc_atomic_fixed4_mul_float8
__kmpc_atomic_fixed4_mul_fp
__kmpc_atomic_fixed4_neqv
__kmpc_atomic_fixed4_neqv_cpt
__kmpc_atomic_fixed4_orb
__kmpc_atomic_fixed4_orb_cpt
__kmpc_atomic_fixed4_orl
__kmpc_atomic_fixed4_orl_cpt
__kmpc_atomic_fixed4_rd
__kmpc_atomic_fixed4_shl
__kmpc_atomic_fixed4_shl_cpt
__kmpc_atomic_fixed4_shl_cpt_rev
__kmpc_atomic_fixed4_shl_rev
__kmpc_atomic_fixed4_shr
__kmpc_atomic_fixed4_shr_cpt
__kmpc_atomic_fixed4_shr_cpt_rev
__kmpc_atomic_fixed4_shr_rev
__kmpc_atomic_fixed4_sub
__kmpc_atomic_fixed4_sub_cpt
__kmpc_atomic_fixed4_sub_cpt_rev
__kmpc_atomic_fixed4_sub_fp
__kmpc_atomic_fixed4_sub_rev
__kmpc_atomic_fixed4_swp
__kmpc_atomic_fixed4_wr
__kmpc_atomic_fixed4_xor
__kmpc_atomic_fixed4_xor_cpt
__kmpc_atomic_fixed4u_div
__kmpc_atomic_fixed4u_div_cpt
__kmpc_atomic_fixed4u_div_cpt_rev
__kmpc_atomic_fixed4u_div_fp
__kmpc_atomic_fixed4u_div_rev
__kmpc_atomic_fixed4u_shr
__kmpc_atomic_fixed4u_shr_cpt
__kmpc_atomic_fixed4u_shr_cpt_rev
__kmpc_atomic_fixed4u_shr_rev
__kmpc_atomic_fixed8_add
__kmpc_atomic_fixed8_add_cpt
__kmpc_atomic_fixed8_add_fp
__kmpc_atomic_fixed8_andb
__kmpc_atomic_fixed8_andb_cpt
__kmpc_atomic_fixed8_andl
__kmpc_atomic_fixed8_andl_cpt
__kmpc_atomic_fixed8_div
__kmpc_atomic_fixed8_div_cpt
__kmpc_atomic_fixed8_div_cpt_rev
__kmpc_atomic_fixed8_div_float8
__kmpc_atomic_fixed8_div_fp
__kmpc_atomic_fixed8_div_rev
__kmpc_atomic_fixed8_eqv
__kmpc_atomic_fixed8_eqv_cpt
__kmpc_atomic_fixed8_max
__kmpc_atomic_fixed8_max_cpt
__kmpc_atomic_fixed8_min
__kmpc_atomic_fixed8_min_cpt
__kmpc_atomic_fixed8_mul
__kmpc_atomic_fixed8_mul_cpt
__kmpc_atomic_fixed8_mul_float8
__kmpc_atomic_fixed8_mul_fp
__kmpc_atomic_fixed8_neqv
__kmpc_atomic_fixed8_neqv_cpt
__kmpc_atomic_fixed8_orb
__kmpc_atomic_fixed8_orb_cpt
__kmpc_atomic_fixed8_orl
__kmpc_atomic_fixed8_orl_cpt
__kmpc_atomic_fixed8_rd
__kmpc_atomic_fixed8_shl
__kmpc_atomic_fixed8_shl_cpt
__kmpc_atomic_fixed8_shl_cpt_rev
__kmpc_atomic_fixed8_shl_rev
__kmpc_atomic_fixed8_shr
__kmpc_atomic_fixed8_shr_cpt
__kmpc_atomic_fixed8_shr_cpt_rev
__kmpc_atomic_fixed8_shr_rev
__kmpc_atomic_fixed8_sub
__kmpc_atomic_fixed8_sub_cpt
__kmpc_atomic_fixed8_sub_cpt_rev
__kmpc_atomic_fixed8_sub_fp
__kmpc_atomic_fixed8_sub_rev
__kmpc_atomic_fixed8_swp
__kmpc_atomic_fixed8_wr
__kmpc_atomic_fixed8_xor
__kmpc_atomic_fixed8_xor_cpt
__kmpc_atomic_fixed8u_div
__kmpc_atomic_fixed8u_div_cpt
__kmpc_atomic_fixed8u_div_cpt_rev
__kmpc_atomic_fixed8u_div_fp
__kmpc_atomic_fixed8u_div_rev
__kmpc_atomic_fixed8u_shr
__kmpc_atomic_fixed8u_shr_cpt
__kmpc_atomic_fixed8u_shr_cpt_rev
__kmpc_atomic_fixed8u_shr_rev
@endcode
Functions for floating point
----------------------------
There are versions here for floating point numbers of size 4, 8, 10 and 16 bytes.
(Ten byte floats are used by X87, but are now rare).
@code
__kmpc_atomic_float4_add
__kmpc_atomic_float4_add_cpt
__kmpc_atomic_float4_add_float8
__kmpc_atomic_float4_add_fp
__kmpc_atomic_float4_div
__kmpc_atomic_float4_div_cpt
__kmpc_atomic_float4_div_cpt_rev
__kmpc_atomic_float4_div_float8
__kmpc_atomic_float4_div_fp
__kmpc_atomic_float4_div_rev
__kmpc_atomic_float4_max
__kmpc_atomic_float4_max_cpt
__kmpc_atomic_float4_min
__kmpc_atomic_float4_min_cpt
__kmpc_atomic_float4_mul
__kmpc_atomic_float4_mul_cpt
__kmpc_atomic_float4_mul_float8
__kmpc_atomic_float4_mul_fp
__kmpc_atomic_float4_rd
__kmpc_atomic_float4_sub
__kmpc_atomic_float4_sub_cpt
__kmpc_atomic_float4_sub_cpt_rev
__kmpc_atomic_float4_sub_float8
__kmpc_atomic_float4_sub_fp
__kmpc_atomic_float4_sub_rev
__kmpc_atomic_float4_swp
__kmpc_atomic_float4_wr
__kmpc_atomic_float8_add
__kmpc_atomic_float8_add_cpt
__kmpc_atomic_float8_add_fp
__kmpc_atomic_float8_div
__kmpc_atomic_float8_div_cpt
__kmpc_atomic_float8_div_cpt_rev
__kmpc_atomic_float8_div_fp
__kmpc_atomic_float8_div_rev
__kmpc_atomic_float8_max
__kmpc_atomic_float8_max_cpt
__kmpc_atomic_float8_min
__kmpc_atomic_float8_min_cpt
__kmpc_atomic_float8_mul
__kmpc_atomic_float8_mul_cpt
__kmpc_atomic_float8_mul_fp
__kmpc_atomic_float8_rd
__kmpc_atomic_float8_sub
__kmpc_atomic_float8_sub_cpt
__kmpc_atomic_float8_sub_cpt_rev
__kmpc_atomic_float8_sub_fp
__kmpc_atomic_float8_sub_rev
__kmpc_atomic_float8_swp
__kmpc_atomic_float8_wr
__kmpc_atomic_float10_add
__kmpc_atomic_float10_add_cpt
__kmpc_atomic_float10_add_fp
__kmpc_atomic_float10_div
__kmpc_atomic_float10_div_cpt
__kmpc_atomic_float10_div_cpt_rev
__kmpc_atomic_float10_div_fp
__kmpc_atomic_float10_div_rev
__kmpc_atomic_float10_mul
__kmpc_atomic_float10_mul_cpt
__kmpc_atomic_float10_mul_fp
__kmpc_atomic_float10_rd
__kmpc_atomic_float10_sub
__kmpc_atomic_float10_sub_cpt
__kmpc_atomic_float10_sub_cpt_rev
__kmpc_atomic_float10_sub_fp
__kmpc_atomic_float10_sub_rev
__kmpc_atomic_float10_swp
__kmpc_atomic_float10_wr
__kmpc_atomic_float16_add
__kmpc_atomic_float16_add_cpt
__kmpc_atomic_float16_div
__kmpc_atomic_float16_div_cpt
__kmpc_atomic_float16_div_cpt_rev
__kmpc_atomic_float16_div_rev
__kmpc_atomic_float16_max
__kmpc_atomic_float16_max_cpt
__kmpc_atomic_float16_min
__kmpc_atomic_float16_min_cpt
__kmpc_atomic_float16_mul
__kmpc_atomic_float16_mul_cpt
__kmpc_atomic_float16_rd
__kmpc_atomic_float16_sub
__kmpc_atomic_float16_sub_cpt
__kmpc_atomic_float16_sub_cpt_rev
__kmpc_atomic_float16_sub_rev
__kmpc_atomic_float16_swp
__kmpc_atomic_float16_wr
@endcode
Functions for Complex types
---------------------------
Functions for complex types whose component floating point variables are of size 4,8,10 or 16 bytes.
The names here are based on the size of the component float, *not* the size of the complex type. So
`__kmpc_atomc_cmplx8_add` is an operation on a `complex<double>` or `complex(kind=8)`, *not* `complex<float>`.
@code
__kmpc_atomic_cmplx4_add
__kmpc_atomic_cmplx4_add_cmplx8
__kmpc_atomic_cmplx4_add_cpt
__kmpc_atomic_cmplx4_div
__kmpc_atomic_cmplx4_div_cmplx8
__kmpc_atomic_cmplx4_div_cpt
__kmpc_atomic_cmplx4_div_cpt_rev
__kmpc_atomic_cmplx4_div_rev
__kmpc_atomic_cmplx4_mul
__kmpc_atomic_cmplx4_mul_cmplx8
__kmpc_atomic_cmplx4_mul_cpt
__kmpc_atomic_cmplx4_rd
__kmpc_atomic_cmplx4_sub
__kmpc_atomic_cmplx4_sub_cmplx8
__kmpc_atomic_cmplx4_sub_cpt
__kmpc_atomic_cmplx4_sub_cpt_rev
__kmpc_atomic_cmplx4_sub_rev
__kmpc_atomic_cmplx4_swp
__kmpc_atomic_cmplx4_wr
__kmpc_atomic_cmplx8_add
__kmpc_atomic_cmplx8_add_cpt
__kmpc_atomic_cmplx8_div
__kmpc_atomic_cmplx8_div_cpt
__kmpc_atomic_cmplx8_div_cpt_rev
__kmpc_atomic_cmplx8_div_rev
__kmpc_atomic_cmplx8_mul
__kmpc_atomic_cmplx8_mul_cpt
__kmpc_atomic_cmplx8_rd
__kmpc_atomic_cmplx8_sub
__kmpc_atomic_cmplx8_sub_cpt
__kmpc_atomic_cmplx8_sub_cpt_rev
__kmpc_atomic_cmplx8_sub_rev
__kmpc_atomic_cmplx8_swp
__kmpc_atomic_cmplx8_wr
__kmpc_atomic_cmplx10_add
__kmpc_atomic_cmplx10_add_cpt
__kmpc_atomic_cmplx10_div
__kmpc_atomic_cmplx10_div_cpt
__kmpc_atomic_cmplx10_div_cpt_rev
__kmpc_atomic_cmplx10_div_rev
__kmpc_atomic_cmplx10_mul
__kmpc_atomic_cmplx10_mul_cpt
__kmpc_atomic_cmplx10_rd
__kmpc_atomic_cmplx10_sub
__kmpc_atomic_cmplx10_sub_cpt
__kmpc_atomic_cmplx10_sub_cpt_rev
__kmpc_atomic_cmplx10_sub_rev
__kmpc_atomic_cmplx10_swp
__kmpc_atomic_cmplx10_wr
__kmpc_atomic_cmplx16_add
__kmpc_atomic_cmplx16_add_cpt
__kmpc_atomic_cmplx16_div
__kmpc_atomic_cmplx16_div_cpt
__kmpc_atomic_cmplx16_div_cpt_rev
__kmpc_atomic_cmplx16_div_rev
__kmpc_atomic_cmplx16_mul
__kmpc_atomic_cmplx16_mul_cpt
__kmpc_atomic_cmplx16_rd
__kmpc_atomic_cmplx16_sub
__kmpc_atomic_cmplx16_sub_cpt
__kmpc_atomic_cmplx16_sub_cpt_rev
__kmpc_atomic_cmplx16_swp
__kmpc_atomic_cmplx16_wr
@endcode
*/
/*!
@ingroup ATOMIC_OPS
@{
*/
/*
* Global vars
*/
#ifndef KMP_GOMP_COMPAT
int __kmp_atomic_mode = 1; // Intel perf
#else
int __kmp_atomic_mode = 2; // GOMP compatibility
#endif /* KMP_GOMP_COMPAT */
KMP_ALIGN(128)
kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */
kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */
kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */
kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */
kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */
kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */
kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/
kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/
kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */
/*
2007-03-02:
Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a
bug on *_32 and *_32e. This is just a temporary workaround for the problem.
It seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG
routines in assembler language.
*/
#define KMP_ATOMIC_VOLATILE volatile
#if ( KMP_ARCH_X86 ) && KMP_HAVE_QUAD
static inline void operator +=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q /= rhs.q; };
static inline bool operator < ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q < rhs.q; }
static inline bool operator > ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q > rhs.q; }
static inline void operator +=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q /= rhs.q; };
static inline bool operator < ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q < rhs.q; }
static inline bool operator > ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q > rhs.q; }
static inline void operator +=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q /= rhs.q; };
static inline void operator +=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q /= rhs.q; };
#endif
/* ------------------------------------------------------------------------ */
/* ATOMIC implementation routines */
/* one routine for each operation and operand type */
/* ------------------------------------------------------------------------ */
// All routines declarations looks like
// void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs );
// ------------------------------------------------------------------------
#define KMP_CHECK_GTID \
if ( gtid == KMP_GTID_UNKNOWN ) { \
gtid = __kmp_entry_gtid(); \
} // check and get gtid when needed
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE, RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
// Lock variables used for critical sections for various size operands
#define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat
#define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char
#define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short
#define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int
#define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float
#define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int
#define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double
#define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex
#define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double
#define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad
#define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex
#define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex
#define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex
// ------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
(*lhs) OP (rhs); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// ------------------------------------------------------------------------
// For GNU compatibility, we may need to use a critical section,
// even though it is not required by the ISA.
//
// On IA-32 architecture, all atomic operations except for fixed 4 byte add,
// sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common
// critical section. On Intel(R) 64, all atomic operations are done with fetch
// and add or compare and exchange. Therefore, the FLAG parameter to this
// macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which
// require a critical section, where we predict that they will be implemented
// in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()).
//
// When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct,
// the FLAG parameter should always be 1. If we know that we will be using
// a critical section, then we want to make certain that we use the generic
// lock __kmp_atomic_lock to protect the atomic update, and not of of the
// locks that are specialized based upon the size or type of the data.
//
// If FLAG is 0, then we are relying on dead code elimination by the build
// compiler to get rid of the useless block of code, and save a needless
// branch at runtime.
//
#ifdef KMP_GOMP_COMPAT
# define OP_GOMP_CRITICAL(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL( OP, 0 ); \
return; \
}
# else
# define OP_GOMP_CRITICAL(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
#if KMP_MIC
# define KMP_DO_PAUSE _mm_delay_32( 1 )
#else
# define KMP_DO_PAUSE KMP_CPU_PAUSE()
#endif /* KMP_MIC */
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
#define OP_CMPXCHG(TYPE,BITS,OP) \
{ \
TYPE old_value, new_value; \
old_value = *(TYPE volatile *)lhs; \
new_value = old_value OP rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_DO_PAUSE; \
\
old_value = *(TYPE volatile *)lhs; \
new_value = old_value OP rhs; \
} \
}
#if USE_CMPXCHG_FIX
// 2007-06-25:
// workaround for C78287 (complex(kind=4) data type)
// lin_32, lin_32e, win_32 and win_32e are affected (I verified the asm)
// Compiler ignores the volatile qualifier of the temp_val in the OP_CMPXCHG macro.
// This is a problem of the compiler.
// Related tracker is C76005, targeted to 11.0.
// I verified the asm of the workaround.
#define OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \
{ \
char anonym[ ( sizeof( TYPE ) == sizeof( kmp_int##BITS ) ) ? ( 1 ) : ( 0 ) ] = { 1 }; \
struct _sss { \
TYPE cmp; \
kmp_int##BITS *vvv; \
}; \
struct _sss old_value, new_value; \
old_value.vvv = ( kmp_int##BITS * )&old_value.cmp; \
new_value.vvv = ( kmp_int##BITS * )&new_value.cmp; \
*old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \
new_value.cmp = old_value.cmp OP rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \
*VOLATILE_CAST(kmp_int##BITS *) new_value.vvv ) ) \
{ \
KMP_DO_PAUSE; \
\
*old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \
new_value.cmp = old_value.cmp OP rhs; \
} \
}
// end of the first part of the workaround for C78287
#endif // USE_CMPXCHG_FIX
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \
KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
#if USE_CMPXCHG_FIX
// -------------------------------------------------------------------------
// workaround for C78287 (complex(kind=4) data type)
#define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \
}
// end of the second part of the workaround for C78287
#endif
#else
// -------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \
KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
#if USE_CMPXCHG_FIX
// -------------------------------------------------------------------------
// workaround for C78287 (complex(kind=4) data type)
#define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
// end of the second part of the workaround for C78287
#endif // USE_CMPXCHG_FIX
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
// Routines for ATOMIC 4-byte operands addition and subtraction
ATOMIC_FIXED_ADD( fixed4, add, kmp_int32, 32, +, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add
ATOMIC_FIXED_ADD( fixed4, sub, kmp_int32, 32, -, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub
ATOMIC_CMPXCHG( float4, add, kmp_real32, 32, +, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add
ATOMIC_CMPXCHG( float4, sub, kmp_real32, 32, -, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub
// Routines for ATOMIC 8-byte operands addition and subtraction
ATOMIC_FIXED_ADD( fixed8, add, kmp_int64, 64, +, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add
ATOMIC_FIXED_ADD( fixed8, sub, kmp_int64, 64, -, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub
ATOMIC_CMPXCHG( float8, add, kmp_real64, 64, +, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add
ATOMIC_CMPXCHG( float8, sub, kmp_real64, 64, -, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub
// ------------------------------------------------------------------------
// Entries definition for integer operands
// TYPE_ID - operands type and size (fixed4, float4)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operand type
// BITS - size in bits, used to distinguish low level calls
// OP - operator (used in critical section)
// LCK_ID - lock identifier, used to possibly distinguish lock variable
// MASK - used for alignment check
// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for ATOMIC integer operands, other operators
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG
ATOMIC_CMPXCHG( fixed1, add, kmp_int8, 8, +, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add
ATOMIC_CMPXCHG( fixed1, andb, kmp_int8, 8, &, 1i, 0, 0 ) // __kmpc_atomic_fixed1_andb
ATOMIC_CMPXCHG( fixed1, div, kmp_int8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div
ATOMIC_CMPXCHG( fixed1u, div, kmp_uint8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div
ATOMIC_CMPXCHG( fixed1, mul, kmp_int8, 8, *, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul
ATOMIC_CMPXCHG( fixed1, orb, kmp_int8, 8, |, 1i, 0, 0 ) // __kmpc_atomic_fixed1_orb
ATOMIC_CMPXCHG( fixed1, shl, kmp_int8, 8, <<, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl
ATOMIC_CMPXCHG( fixed1, shr, kmp_int8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr
ATOMIC_CMPXCHG( fixed1u, shr, kmp_uint8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr
ATOMIC_CMPXCHG( fixed1, sub, kmp_int8, 8, -, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub
ATOMIC_CMPXCHG( fixed1, xor, kmp_int8, 8, ^, 1i, 0, 0 ) // __kmpc_atomic_fixed1_xor
ATOMIC_CMPXCHG( fixed2, add, kmp_int16, 16, +, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add
ATOMIC_CMPXCHG( fixed2, andb, kmp_int16, 16, &, 2i, 1, 0 ) // __kmpc_atomic_fixed2_andb
ATOMIC_CMPXCHG( fixed2, div, kmp_int16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div
ATOMIC_CMPXCHG( fixed2u, div, kmp_uint16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div
ATOMIC_CMPXCHG( fixed2, mul, kmp_int16, 16, *, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul
ATOMIC_CMPXCHG( fixed2, orb, kmp_int16, 16, |, 2i, 1, 0 ) // __kmpc_atomic_fixed2_orb
ATOMIC_CMPXCHG( fixed2, shl, kmp_int16, 16, <<, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl
ATOMIC_CMPXCHG( fixed2, shr, kmp_int16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr
ATOMIC_CMPXCHG( fixed2u, shr, kmp_uint16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr
ATOMIC_CMPXCHG( fixed2, sub, kmp_int16, 16, -, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub
ATOMIC_CMPXCHG( fixed2, xor, kmp_int16, 16, ^, 2i, 1, 0 ) // __kmpc_atomic_fixed2_xor
ATOMIC_CMPXCHG( fixed4, andb, kmp_int32, 32, &, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andb
ATOMIC_CMPXCHG( fixed4, div, kmp_int32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div
ATOMIC_CMPXCHG( fixed4u, div, kmp_uint32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div
ATOMIC_CMPXCHG( fixed4, mul, kmp_int32, 32, *, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul
ATOMIC_CMPXCHG( fixed4, orb, kmp_int32, 32, |, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orb
ATOMIC_CMPXCHG( fixed4, shl, kmp_int32, 32, <<, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl
ATOMIC_CMPXCHG( fixed4, shr, kmp_int32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr
ATOMIC_CMPXCHG( fixed4u, shr, kmp_uint32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr
ATOMIC_CMPXCHG( fixed4, xor, kmp_int32, 32, ^, 4i, 3, 0 ) // __kmpc_atomic_fixed4_xor
ATOMIC_CMPXCHG( fixed8, andb, kmp_int64, 64, &, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb
ATOMIC_CMPXCHG( fixed8, div, kmp_int64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div
ATOMIC_CMPXCHG( fixed8u, div, kmp_uint64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div
ATOMIC_CMPXCHG( fixed8, mul, kmp_int64, 64, *, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul
ATOMIC_CMPXCHG( fixed8, orb, kmp_int64, 64, |, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb
ATOMIC_CMPXCHG( fixed8, shl, kmp_int64, 64, <<, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl
ATOMIC_CMPXCHG( fixed8, shr, kmp_int64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr
ATOMIC_CMPXCHG( fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr
ATOMIC_CMPXCHG( fixed8, xor, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor
ATOMIC_CMPXCHG( float4, div, kmp_real32, 32, /, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div
ATOMIC_CMPXCHG( float4, mul, kmp_real32, 32, *, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul
ATOMIC_CMPXCHG( float8, div, kmp_real64, 64, /, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div
ATOMIC_CMPXCHG( float8, mul, kmp_real64, 64, *, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul
// TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG
/* ------------------------------------------------------------------------ */
/* Routines for C/C++ Reduction operators && and || */
/* ------------------------------------------------------------------------ */
// ------------------------------------------------------------------------
// Need separate macros for &&, || because there is no combined assignment
// TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used
#define ATOMIC_CRIT_L(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \
OP_CRITICAL( = *lhs OP, LCK_ID ) \
}
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ===================================
#define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(= *lhs OP,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(= *lhs OP,LCK_ID) /* unaligned - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
ATOMIC_CMPX_L( fixed1, andl, char, 8, &&, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl
ATOMIC_CMPX_L( fixed1, orl, char, 8, ||, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl
ATOMIC_CMPX_L( fixed2, andl, short, 16, &&, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl
ATOMIC_CMPX_L( fixed2, orl, short, 16, ||, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl
ATOMIC_CMPX_L( fixed4, andl, kmp_int32, 32, &&, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andl
ATOMIC_CMPX_L( fixed4, orl, kmp_int32, 32, ||, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orl
ATOMIC_CMPX_L( fixed8, andl, kmp_int64, 64, &&, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl
ATOMIC_CMPX_L( fixed8, orl, kmp_int64, 64, ||, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl
/* ------------------------------------------------------------------------- */
/* Routines for Fortran operators that matched no one in C: */
/* MAX, MIN, .EQV., .NEQV. */
/* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */
/* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */
/* ------------------------------------------------------------------------- */
// -------------------------------------------------------------------------
// MIN and MAX need separate macros
// OP - operator to check if we need any actions?
#define MIN_MAX_CRITSECT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if ( *lhs OP rhs ) { /* still need actions? */ \
*lhs = rhs; \
} \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// -------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_MIN_MAX_CRITSECT(OP,FLAG) \
if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \
KMP_CHECK_GTID; \
MIN_MAX_CRITSECT( OP, 0 ); \
return; \
}
#else
#define GOMP_MIN_MAX_CRITSECT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// -------------------------------------------------------------------------
#define MIN_MAX_CMPXCHG(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value; \
temp_val = *lhs; \
old_value = temp_val; \
while ( old_value OP rhs && /* still need actions? */ \
! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \
{ \
KMP_CPU_PAUSE(); \
temp_val = *lhs; \
old_value = temp_val; \
} \
}
// -------------------------------------------------------------------------
// 1-byte, 2-byte operands - use critical section
#define MIN_MAX_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
if ( *lhs OP rhs ) { /* need actions? */ \
GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \
MIN_MAX_CRITSECT(OP,LCK_ID) \
} \
}
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// -------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
if ( *lhs OP rhs ) { \
GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \
MIN_MAX_CMPXCHG(TYPE,BITS,OP) \
} \
}
#else
// -------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
if ( *lhs OP rhs ) { \
GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
MIN_MAX_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
MIN_MAX_CRITSECT(OP,LCK_ID) /* unaligned address */ \
} \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
MIN_MAX_COMPXCHG( fixed1, max, char, 8, <, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max
MIN_MAX_COMPXCHG( fixed1, min, char, 8, >, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min
MIN_MAX_COMPXCHG( fixed2, max, short, 16, <, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max
MIN_MAX_COMPXCHG( fixed2, min, short, 16, >, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min
MIN_MAX_COMPXCHG( fixed4, max, kmp_int32, 32, <, 4i, 3, 0 ) // __kmpc_atomic_fixed4_max
MIN_MAX_COMPXCHG( fixed4, min, kmp_int32, 32, >, 4i, 3, 0 ) // __kmpc_atomic_fixed4_min
MIN_MAX_COMPXCHG( fixed8, max, kmp_int64, 64, <, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max
MIN_MAX_COMPXCHG( fixed8, min, kmp_int64, 64, >, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min
MIN_MAX_COMPXCHG( float4, max, kmp_real32, 32, <, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max
MIN_MAX_COMPXCHG( float4, min, kmp_real32, 32, >, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min
MIN_MAX_COMPXCHG( float8, max, kmp_real64, 64, <, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max
MIN_MAX_COMPXCHG( float8, min, kmp_real64, 64, >, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min
#if KMP_HAVE_QUAD
MIN_MAX_CRITICAL( float16, max, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max
MIN_MAX_CRITICAL( float16, min, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min
#if ( KMP_ARCH_X86 )
MIN_MAX_CRITICAL( float16, max_a16, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16
MIN_MAX_CRITICAL( float16, min_a16, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_min_a16
#endif
#endif
// ------------------------------------------------------------------------
// Need separate macros for .EQV. because of the need of complement (~)
// OP ignored for critical sections, ^=~ used instead
#define ATOMIC_CRIT_EQV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(^=~,LCK_ID) /* send assignment and complement */ \
}
// ------------------------------------------------------------------------
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ===================================
#define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \
OP_CMPXCHG(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(^=~,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(^=~,LCK_ID) /* unaligned address - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
ATOMIC_CMPXCHG( fixed1, neqv, kmp_int8, 8, ^, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv
ATOMIC_CMPXCHG( fixed2, neqv, kmp_int16, 16, ^, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv
ATOMIC_CMPXCHG( fixed4, neqv, kmp_int32, 32, ^, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv
ATOMIC_CMPXCHG( fixed8, neqv, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv
ATOMIC_CMPX_EQV( fixed1, eqv, kmp_int8, 8, ^~, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv
ATOMIC_CMPX_EQV( fixed2, eqv, kmp_int16, 16, ^~, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv
ATOMIC_CMPX_EQV( fixed4, eqv, kmp_int32, 32, ^~, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv
ATOMIC_CMPX_EQV( fixed8, eqv, kmp_int64, 64, ^~, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \
}
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL( float10, add, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add
ATOMIC_CRITICAL( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub
ATOMIC_CRITICAL( float10, mul, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul
ATOMIC_CRITICAL( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL( float16, add, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add
ATOMIC_CRITICAL( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub
ATOMIC_CRITICAL( float16, mul, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul
ATOMIC_CRITICAL( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL( float16, add_a16, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16
ATOMIC_CRITICAL( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16
ATOMIC_CRITICAL( float16, mul_a16, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16
ATOMIC_CRITICAL( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16
#endif
#endif
// routines for complex types
#if USE_CMPXCHG_FIX
// workaround for C78287 (complex(kind=4) data type)
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, add, kmp_cmplx32, 64, +, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_add
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_sub
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_mul
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, div, kmp_cmplx32, 64, /, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_div
// end of the workaround for C78287
#else
ATOMIC_CRITICAL( cmplx4, add, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add
ATOMIC_CRITICAL( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub
ATOMIC_CRITICAL( cmplx4, mul, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul
ATOMIC_CRITICAL( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div
#endif // USE_CMPXCHG_FIX
ATOMIC_CRITICAL( cmplx8, add, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add
ATOMIC_CRITICAL( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub
ATOMIC_CRITICAL( cmplx8, mul, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul
ATOMIC_CRITICAL( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div
ATOMIC_CRITICAL( cmplx10, add, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add
ATOMIC_CRITICAL( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub
ATOMIC_CRITICAL( cmplx10, mul, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul
ATOMIC_CRITICAL( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL( cmplx16, add, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add
ATOMIC_CRITICAL( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub
ATOMIC_CRITICAL( cmplx16, mul, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul
ATOMIC_CRITICAL( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL( cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16
ATOMIC_CRITICAL( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16
ATOMIC_CRITICAL( cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16
ATOMIC_CRITICAL( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16
#endif
#endif
#if OMP_40_ENABLED
// OpenMP 4.0: x = expr binop x for non-commutative operations.
// Supported only on IA-32 architecture and Intel(R) 64
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_REV(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
(*lhs) = (rhs) OP (*lhs); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_REV(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_REV( OP, 0 ); \
return; \
}
#else
#define OP_GOMP_CRITICAL_REV(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE, RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid ));
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_REV(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_DO_PAUSE; \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
} \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \
OP_CMPXCHG_REV(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
// Entries definition for integer operands
// TYPE_ID - operands type and size (fixed4, float4)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operand type
// BITS - size in bits, used to distinguish low level calls
// OP - operator (used in critical section)
// LCK_ID - lock identifier, used to possibly distinguish lock variable
// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for ATOMIC integer operands, other operators
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG
ATOMIC_CMPXCHG_REV( fixed1, div, kmp_int8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev
ATOMIC_CMPXCHG_REV( fixed1u, div, kmp_uint8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev
ATOMIC_CMPXCHG_REV( fixed1, shl, kmp_int8, 8, <<, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_rev
ATOMIC_CMPXCHG_REV( fixed1, shr, kmp_int8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_rev
ATOMIC_CMPXCHG_REV( fixed1u, shr, kmp_uint8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_rev
ATOMIC_CMPXCHG_REV( fixed1, sub, kmp_int8, 8, -, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev
ATOMIC_CMPXCHG_REV( fixed2, div, kmp_int16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev
ATOMIC_CMPXCHG_REV( fixed2u, div, kmp_uint16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev
ATOMIC_CMPXCHG_REV( fixed2, shl, kmp_int16, 16, <<, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_rev
ATOMIC_CMPXCHG_REV( fixed2, shr, kmp_int16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_rev
ATOMIC_CMPXCHG_REV( fixed2u, shr, kmp_uint16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_rev
ATOMIC_CMPXCHG_REV( fixed2, sub, kmp_int16, 16, -, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev
ATOMIC_CMPXCHG_REV( fixed4, div, kmp_int32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_rev
ATOMIC_CMPXCHG_REV( fixed4u, div, kmp_uint32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_rev
ATOMIC_CMPXCHG_REV( fixed4, shl, kmp_int32, 32, <<, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_rev
ATOMIC_CMPXCHG_REV( fixed4, shr, kmp_int32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_rev
ATOMIC_CMPXCHG_REV( fixed4u, shr, kmp_uint32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_rev
ATOMIC_CMPXCHG_REV( fixed4, sub, kmp_int32, 32, -, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_rev
ATOMIC_CMPXCHG_REV( fixed8, div, kmp_int64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev
ATOMIC_CMPXCHG_REV( fixed8u, div, kmp_uint64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev
ATOMIC_CMPXCHG_REV( fixed8, shl, kmp_int64, 64, <<, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_rev
ATOMIC_CMPXCHG_REV( fixed8, shr, kmp_int64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_rev
ATOMIC_CMPXCHG_REV( fixed8u, shr, kmp_uint64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_rev
ATOMIC_CMPXCHG_REV( fixed8, sub, kmp_int64, 64, -, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev
ATOMIC_CMPXCHG_REV( float4, div, kmp_real32, 32, /, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev
ATOMIC_CMPXCHG_REV( float4, sub, kmp_real32, 32, -, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev
ATOMIC_CMPXCHG_REV( float8, div, kmp_real64, 64, /, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev
ATOMIC_CMPXCHG_REV( float8, sub, kmp_real64, 64, -, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev
// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \
OP_CRITICAL_REV(OP,LCK_ID) \
}
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL_REV( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_rev
ATOMIC_CRITICAL_REV( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_rev
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL_REV( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_rev
ATOMIC_CRITICAL_REV( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_REV( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_rev
ATOMIC_CRITICAL_REV( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_rev
#endif
#endif
// routines for complex types
ATOMIC_CRITICAL_REV( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_rev
ATOMIC_CRITICAL_REV( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_rev
ATOMIC_CRITICAL_REV( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_rev
ATOMIC_CRITICAL_REV( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_rev
ATOMIC_CRITICAL_REV( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_rev
ATOMIC_CRITICAL_REV( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_rev
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_REV( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_rev
ATOMIC_CRITICAL_REV( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_REV( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_rev
ATOMIC_CRITICAL_REV( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_rev
#endif
#endif
#endif //KMP_ARCH_X86 || KMP_ARCH_X86_64
// End of OpenMP 4.0: x = expr binop x for non-commutative operations.
#endif //OMP_40_ENABLED
/* ------------------------------------------------------------------------ */
/* Routines for mixed types of LHS and RHS, when RHS is "larger" */
/* Note: in order to reduce the total number of types combinations */
/* it is supposed that compiler converts RHS to longest floating type,*/
/* that is _Quad, before call to any of these routines */
/* Conversion to _Quad will be done by the compiler during calculation, */
/* conversion back to TYPE - before the assignment, like: */
/* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */
/* Performance penalty expected because of SW emulation use */
/* ------------------------------------------------------------------------ */
#define ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid ));
// -------------------------------------------------------------------------
#define ATOMIC_CRITICAL_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \
}
// -------------------------------------------------------------------------
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// -------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
// -------------------------------------------------------------------------
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
// RHS=float8
ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8
ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8
ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_float8
ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_float8
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_float8
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_float8
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_float8
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_float8
// RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them)
#if KMP_HAVE_QUAD
ATOMIC_CMPXCHG_MIX( fixed1, char, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_fp
ATOMIC_CMPXCHG_MIX( fixed1, char, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_fp
ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_fp
ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_fp
ATOMIC_CMPXCHG_MIX( fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_fp
ATOMIC_CMPXCHG_MIX( fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_fp
ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_fp
ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_fp
ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_fp
ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp
ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp
ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp
#endif
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#if USE_CMPXCHG_FIX
// workaround for C78287 (complex(kind=4) data type)
#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \
}
// end of the second part of the workaround for C78287
#else
#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
#endif // USE_CMPXCHG_FIX
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_add_cmplx8
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_sub_cmplx8
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_mul_cmplx8
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_div_cmplx8
// READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
//////////////////////////////////////////////////////////////////////////////////////////////////////
// ------------------------------------------------------------------------
// Atomic READ routines
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE, RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * loc ) \
{ \
KMP_DEBUG_ASSERT( __kmp_init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store_ret" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
// TODO: check if it is still necessary
// Return old value regardless of the result of "compare & swap# operation
#define OP_CMPXCHG_READ(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
union f_i_union { \
TYPE f_val; \
kmp_int##BITS i_val; \
}; \
union f_i_union old_value; \
temp_val = *loc; \
old_value.f_val = temp_val; \
old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( (kmp_int##BITS *) loc, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val ); \
new_value = old_value.f_val; \
return new_value; \
}
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_READ(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
new_value = (*loc); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// -------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_READ(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_READ( OP, 0 ); \
return new_value; \
}
#else
#define OP_GOMP_CRITICAL_READ(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// -------------------------------------------------------------------------
#define ATOMIC_FIXED_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \
new_value = KMP_TEST_THEN_ADD##BITS( loc, OP 0 ); \
return new_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \
OP_CMPXCHG_READ(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_READ(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL_READ(OP,LCK_ID) /* send assignment */ \
return new_value; \
}
// ------------------------------------------------------------------------
// Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return value doesn't work.
// Let's return the read value through the additional parameter.
#if ( KMP_OS_WINDOWS )
#define OP_CRITICAL_READ_WRK(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
(*out) = (*loc); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_READ_WRK( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \
void __kmpc_atomic_##TYPE_ID##_##OP_ID( TYPE * out, ident_t *id_ref, int gtid, TYPE * loc ) \
{ \
KMP_DEBUG_ASSERT( __kmp_init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_READ_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \
OP_GOMP_CRITICAL_READ_WRK(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL_READ_WRK(OP,LCK_ID) /* send assignment */ \
}
#endif // KMP_OS_WINDOWS
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
ATOMIC_FIXED_READ( fixed4, rd, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_rd
ATOMIC_FIXED_READ( fixed8, rd, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_rd
ATOMIC_CMPXCHG_READ( float4, rd, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_rd
ATOMIC_CMPXCHG_READ( float8, rd, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_rd
// !!! TODO: Remove lock operations for "char" since it can't be non-atomic
ATOMIC_CMPXCHG_READ( fixed1, rd, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_rd
ATOMIC_CMPXCHG_READ( fixed2, rd, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_rd
ATOMIC_CRITICAL_READ( float10, rd, long double, +, 10r, 1 ) // __kmpc_atomic_float10_rd
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_READ( float16, rd, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_rd
#endif // KMP_HAVE_QUAD
// Fix for CQ220361 on Windows* OS
#if ( KMP_OS_WINDOWS )
ATOMIC_CRITICAL_READ_WRK( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd
#else
ATOMIC_CRITICAL_READ( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd
#endif
ATOMIC_CRITICAL_READ( cmplx8, rd, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_rd
ATOMIC_CRITICAL_READ( cmplx10, rd, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_rd
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_READ( cmplx16, rd, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_rd
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_READ( float16, a16_rd, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_a16_rd
ATOMIC_CRITICAL_READ( cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_rd
#endif
#endif
// ------------------------------------------------------------------------
// Atomic WRITE routines
// ------------------------------------------------------------------------
#define ATOMIC_XCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) \
KMP_XCHG_FIXED##BITS( lhs, rhs ); \
}
// ------------------------------------------------------------------------
#define ATOMIC_XCHG_FLOAT_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) \
KMP_XCHG_REAL##BITS( lhs, rhs ); \
}
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_WR(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
} \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) \
OP_CMPXCHG_WR(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_WR(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(OP,LCK_ID) /* send assignment */ \
}
// -------------------------------------------------------------------------
ATOMIC_XCHG_WR( fixed1, wr, kmp_int8, 8, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_wr
ATOMIC_XCHG_WR( fixed2, wr, kmp_int16, 16, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_wr
ATOMIC_XCHG_WR( fixed4, wr, kmp_int32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_wr
#if ( KMP_ARCH_X86 )
ATOMIC_CMPXCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr
#else
ATOMIC_XCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr
#endif
ATOMIC_XCHG_FLOAT_WR( float4, wr, kmp_real32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_float4_wr
#if ( KMP_ARCH_X86 )
ATOMIC_CMPXCHG_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr
#else
ATOMIC_XCHG_FLOAT_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr
#endif
ATOMIC_CRITICAL_WR( float10, wr, long double, =, 10r, 1 ) // __kmpc_atomic_float10_wr
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_WR( float16, wr, QUAD_LEGACY, =, 16r, 1 ) // __kmpc_atomic_float16_wr
#endif
ATOMIC_CRITICAL_WR( cmplx4, wr, kmp_cmplx32, =, 8c, 1 ) // __kmpc_atomic_cmplx4_wr
ATOMIC_CRITICAL_WR( cmplx8, wr, kmp_cmplx64, =, 16c, 1 ) // __kmpc_atomic_cmplx8_wr
ATOMIC_CRITICAL_WR( cmplx10, wr, kmp_cmplx80, =, 20c, 1 ) // __kmpc_atomic_cmplx10_wr
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_WR( cmplx16, wr, CPLX128_LEG, =, 32c, 1 ) // __kmpc_atomic_cmplx16_wr
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_WR( float16, a16_wr, Quad_a16_t, =, 16r, 1 ) // __kmpc_atomic_float16_a16_wr
ATOMIC_CRITICAL_WR( cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_wr
#endif
#endif
// ------------------------------------------------------------------------
// Atomic CAPTURE routines
// ------------------------------------------------------------------------
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ) \
{ \
KMP_DEBUG_ASSERT( __kmp_init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_CPT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
(*lhs) OP rhs; \
new_value = (*lhs); \
} else { \
new_value = (*lhs); \
(*lhs) OP rhs; \
} \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return new_value;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT( OP##=, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_CPT(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = old_value OP rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = old_value OP rhs; \
} \
if( flag ) { \
return new_value; \
} else \
return old_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \
OP_CMPXCHG_CPT(TYPE,BITS,OP) \
}
// -------------------------------------------------------------------------
#define ATOMIC_FIXED_ADD_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE old_value, new_value; \
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \
/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \
old_value = KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \
if( flag ) { \
return old_value OP rhs; \
} else \
return old_value; \
}
// -------------------------------------------------------------------------
ATOMIC_FIXED_ADD_CPT( fixed4, add_cpt, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_add_cpt
ATOMIC_FIXED_ADD_CPT( fixed4, sub_cpt, kmp_int32, 32, -, 0 ) // __kmpc_atomic_fixed4_sub_cpt
ATOMIC_FIXED_ADD_CPT( fixed8, add_cpt, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt
ATOMIC_FIXED_ADD_CPT( fixed8, sub_cpt, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt
ATOMIC_CMPXCHG_CPT( float4, add_cpt, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt
ATOMIC_CMPXCHG_CPT( float4, sub_cpt, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt
ATOMIC_CMPXCHG_CPT( float8, add_cpt, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt
ATOMIC_CMPXCHG_CPT( float8, sub_cpt, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt
// ------------------------------------------------------------------------
// Entries definition for integer operands
// TYPE_ID - operands type and size (fixed4, float4)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operand type
// BITS - size in bits, used to distinguish low level calls
// OP - operator (used in critical section)
// TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for ATOMIC integer operands, other operators
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
ATOMIC_CMPXCHG_CPT( fixed1, add_cpt, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt
ATOMIC_CMPXCHG_CPT( fixed1, andb_cpt, kmp_int8, 8, &, 0 ) // __kmpc_atomic_fixed1_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed1, div_cpt, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt
ATOMIC_CMPXCHG_CPT( fixed1u, div_cpt, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed1, mul_cpt, kmp_int8, 8, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed1, orb_cpt, kmp_int8, 8, |, 0 ) // __kmpc_atomic_fixed1_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed1, shl_cpt, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed1, shr_cpt, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed1u, shr_cpt, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed1, sub_cpt, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt
ATOMIC_CMPXCHG_CPT( fixed1, xor_cpt, kmp_int8, 8, ^, 0 ) // __kmpc_atomic_fixed1_xor_cpt
ATOMIC_CMPXCHG_CPT( fixed2, add_cpt, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt
ATOMIC_CMPXCHG_CPT( fixed2, andb_cpt, kmp_int16, 16, &, 0 ) // __kmpc_atomic_fixed2_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed2, div_cpt, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt
ATOMIC_CMPXCHG_CPT( fixed2u, div_cpt, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed2, mul_cpt, kmp_int16, 16, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed2, orb_cpt, kmp_int16, 16, |, 0 ) // __kmpc_atomic_fixed2_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed2, shl_cpt, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed2, shr_cpt, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed2u, shr_cpt, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed2, sub_cpt, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt
ATOMIC_CMPXCHG_CPT( fixed2, xor_cpt, kmp_int16, 16, ^, 0 ) // __kmpc_atomic_fixed2_xor_cpt
ATOMIC_CMPXCHG_CPT( fixed4, andb_cpt, kmp_int32, 32, &, 0 ) // __kmpc_atomic_fixed4_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed4, div_cpt, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt
ATOMIC_CMPXCHG_CPT( fixed4u, div_cpt, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed4, mul_cpt, kmp_int32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed4, orb_cpt, kmp_int32, 32, |, 0 ) // __kmpc_atomic_fixed4_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed4, shl_cpt, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed4, shr_cpt, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed4u, shr_cpt, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed4, xor_cpt, kmp_int32, 32, ^, 0 ) // __kmpc_atomic_fixed4_xor_cpt
ATOMIC_CMPXCHG_CPT( fixed8, andb_cpt, kmp_int64, 64, &, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed8, div_cpt, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt
ATOMIC_CMPXCHG_CPT( fixed8u, div_cpt, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed8, mul_cpt, kmp_int64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed8, orb_cpt, kmp_int64, 64, |, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed8, shl_cpt, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed8, shr_cpt, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed8u, shr_cpt, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed8, xor_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor_cpt
ATOMIC_CMPXCHG_CPT( float4, div_cpt, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt
ATOMIC_CMPXCHG_CPT( float4, mul_cpt, kmp_real32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt
ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt
ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for C/C++ Reduction operators && and ||
// ------------------------------------------------------------------------
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_L_CPT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
new_value OP rhs; \
} else \
new_value = (*lhs); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_L_CPT( OP, 0 ); \
return new_value; \
}
#else
#define OP_GOMP_CRITICAL_L_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
// Need separate macros for &&, || because there is no combined assignment
#define ATOMIC_CMPX_L_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_L_CPT( = *lhs OP, GOMP_FLAG ) \
OP_CMPXCHG_CPT(TYPE,BITS,OP) \
}
ATOMIC_CMPX_L_CPT( fixed1, andl_cpt, char, 8, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl_cpt
ATOMIC_CMPX_L_CPT( fixed1, orl_cpt, char, 8, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl_cpt
ATOMIC_CMPX_L_CPT( fixed2, andl_cpt, short, 16, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl_cpt
ATOMIC_CMPX_L_CPT( fixed2, orl_cpt, short, 16, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl_cpt
ATOMIC_CMPX_L_CPT( fixed4, andl_cpt, kmp_int32, 32, &&, 0 ) // __kmpc_atomic_fixed4_andl_cpt
ATOMIC_CMPX_L_CPT( fixed4, orl_cpt, kmp_int32, 32, ||, 0 ) // __kmpc_atomic_fixed4_orl_cpt
ATOMIC_CMPX_L_CPT( fixed8, andl_cpt, kmp_int64, 64, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl_cpt
ATOMIC_CMPX_L_CPT( fixed8, orl_cpt, kmp_int64, 64, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl_cpt
// -------------------------------------------------------------------------
// Routines for Fortran operators that matched no one in C:
// MAX, MIN, .EQV., .NEQV.
// Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt
// Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt
// -------------------------------------------------------------------------
// -------------------------------------------------------------------------
// MIN and MAX need separate macros
// OP - operator to check if we need any actions?
#define MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if ( *lhs OP rhs ) { /* still need actions? */ \
old_value = *lhs; \
*lhs = rhs; \
if ( flag ) \
new_value = rhs; \
else \
new_value = old_value; \
} \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return new_value; \
// -------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) \
if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \
KMP_CHECK_GTID; \
MIN_MAX_CRITSECT_CPT( OP, 0 ); \
}
#else
#define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// -------------------------------------------------------------------------
#define MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
/*TYPE old_value; */ \
temp_val = *lhs; \
old_value = temp_val; \
while ( old_value OP rhs && /* still need actions? */ \
! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \
{ \
KMP_CPU_PAUSE(); \
temp_val = *lhs; \
old_value = temp_val; \
} \
if( flag ) \
return rhs; \
else \
return old_value; \
}
// -------------------------------------------------------------------------
// 1-byte, 2-byte operands - use critical section
#define MIN_MAX_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value, old_value; \
if ( *lhs OP rhs ) { /* need actions? */ \
GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \
MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \
} \
return *lhs; \
}
#define MIN_MAX_COMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value, old_value; \
if ( *lhs OP rhs ) { \
GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \
MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \
} \
return *lhs; \
}
MIN_MAX_COMPXCHG_CPT( fixed1, max_cpt, char, 8, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed1, min_cpt, char, 8, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min_cpt
MIN_MAX_COMPXCHG_CPT( fixed2, max_cpt, short, 16, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed2, min_cpt, short, 16, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min_cpt
MIN_MAX_COMPXCHG_CPT( fixed4, max_cpt, kmp_int32, 32, <, 0 ) // __kmpc_atomic_fixed4_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed4, min_cpt, kmp_int32, 32, >, 0 ) // __kmpc_atomic_fixed4_min_cpt
MIN_MAX_COMPXCHG_CPT( fixed8, max_cpt, kmp_int64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed8, min_cpt, kmp_int64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min_cpt
MIN_MAX_COMPXCHG_CPT( float4, max_cpt, kmp_real32, 32, <, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max_cpt
MIN_MAX_COMPXCHG_CPT( float4, min_cpt, kmp_real32, 32, >, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min_cpt
MIN_MAX_COMPXCHG_CPT( float8, max_cpt, kmp_real64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max_cpt
MIN_MAX_COMPXCHG_CPT( float8, min_cpt, kmp_real64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min_cpt
#if KMP_HAVE_QUAD
MIN_MAX_CRITICAL_CPT( float16, max_cpt, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max_cpt
MIN_MAX_CRITICAL_CPT( float16, min_cpt, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min_cpt
#if ( KMP_ARCH_X86 )
MIN_MAX_CRITICAL_CPT( float16, max_a16_cpt, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16_cpt
MIN_MAX_CRITICAL_CPT( float16, min_a16_cpt, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_mix_a16_cpt
#endif
#endif
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_CMPX_EQV_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_EQV_CPT(^=~,GOMP_FLAG) /* send assignment */ \
OP_CMPXCHG_CPT(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
ATOMIC_CMPXCHG_CPT( fixed1, neqv_cpt, kmp_int8, 8, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv_cpt
ATOMIC_CMPXCHG_CPT( fixed2, neqv_cpt, kmp_int16, 16, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv_cpt
ATOMIC_CMPXCHG_CPT( fixed4, neqv_cpt, kmp_int32, 32, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv_cpt
ATOMIC_CMPXCHG_CPT( fixed8, neqv_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed1, eqv_cpt, kmp_int8, 8, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed2, eqv_cpt, kmp_int16, 16, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed4, eqv_cpt, kmp_int32, 32, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed8, eqv_cpt, kmp_int64, 64, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv_cpt
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \
}
// ------------------------------------------------------------------------
// Workaround for cmplx4. Regular routines with return value don't work
// on Win_32e. Let's return captured values through the additional parameter.
#define OP_CRITICAL_CPT_WRK(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
(*lhs) OP rhs; \
(*out) = (*lhs); \
} else { \
(*out) = (*lhs); \
(*lhs) OP rhs; \
} \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT_WRK( OP##=, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \
void __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out, int flag ) \
{ \
KMP_DEBUG_ASSERT( __kmp_init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \
OP_GOMP_CRITICAL_CPT_WRK(OP,GOMP_FLAG) \
OP_CRITICAL_CPT_WRK(OP##=,LCK_ID) \
}
// The end of workaround for cmplx4
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL_CPT( float10, add_cpt, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add_cpt
ATOMIC_CRITICAL_CPT( float10, sub_cpt, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt
ATOMIC_CRITICAL_CPT( float10, mul_cpt, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt
ATOMIC_CRITICAL_CPT( float10, div_cpt, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL_CPT( float16, add_cpt, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add_cpt
ATOMIC_CRITICAL_CPT( float16, sub_cpt, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt
ATOMIC_CRITICAL_CPT( float16, mul_cpt, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul_cpt
ATOMIC_CRITICAL_CPT( float16, div_cpt, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT( float16, add_a16_cpt, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16_cpt
ATOMIC_CRITICAL_CPT( float16, sub_a16_cpt, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt
ATOMIC_CRITICAL_CPT( float16, mul_a16_cpt, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16_cpt
ATOMIC_CRITICAL_CPT( float16, div_a16_cpt, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt
#endif
#endif
// routines for complex types
// cmplx4 routines to return void
ATOMIC_CRITICAL_CPT_WRK( cmplx4, add_cpt, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add_cpt
ATOMIC_CRITICAL_CPT_WRK( cmplx4, sub_cpt, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt
ATOMIC_CRITICAL_CPT_WRK( cmplx4, mul_cpt, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul_cpt
ATOMIC_CRITICAL_CPT_WRK( cmplx4, div_cpt, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt
ATOMIC_CRITICAL_CPT( cmplx8, add_cpt, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add_cpt
ATOMIC_CRITICAL_CPT( cmplx8, sub_cpt, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt
ATOMIC_CRITICAL_CPT( cmplx8, mul_cpt, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul_cpt
ATOMIC_CRITICAL_CPT( cmplx8, div_cpt, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt
ATOMIC_CRITICAL_CPT( cmplx10, add_cpt, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add_cpt
ATOMIC_CRITICAL_CPT( cmplx10, sub_cpt, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt
ATOMIC_CRITICAL_CPT( cmplx10, mul_cpt, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul_cpt
ATOMIC_CRITICAL_CPT( cmplx10, div_cpt, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_CPT( cmplx16, add_cpt, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_cpt
ATOMIC_CRITICAL_CPT( cmplx16, sub_cpt, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt
ATOMIC_CRITICAL_CPT( cmplx16, mul_cpt, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_cpt
ATOMIC_CRITICAL_CPT( cmplx16, div_cpt, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT( cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16_cpt
ATOMIC_CRITICAL_CPT( cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt
ATOMIC_CRITICAL_CPT( cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16_cpt
ATOMIC_CRITICAL_CPT( cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt
#endif
#endif
#if OMP_40_ENABLED
// OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations.
// Supported only on IA-32 architecture and Intel(R) 64
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_CPT_REV(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
/*temp_val = (*lhs);*/\
(*lhs) = (rhs) OP (*lhs); \
new_value = (*lhs); \
} else { \
new_value = (*lhs);\
(*lhs) = (rhs) OP (*lhs); \
} \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return new_value;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT_REV( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
} \
if( flag ) { \
return new_value; \
} else \
return old_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \
OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \
}
ATOMIC_CMPXCHG_CPT_REV( fixed1, div_cpt_rev, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1u, div_cpt_rev, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1, shl_cpt_rev, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1, shr_cpt_rev, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1u, shr_cpt_rev, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1, sub_cpt_rev, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, div_cpt_rev, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2u, div_cpt_rev, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, shl_cpt_rev, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, shr_cpt_rev, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2u, shr_cpt_rev, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, sub_cpt_rev, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, div_cpt_rev, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4u, div_cpt_rev, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, shl_cpt_rev, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, shr_cpt_rev, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4u, shr_cpt_rev, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, sub_cpt_rev, kmp_int32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, div_cpt_rev, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8u, div_cpt_rev, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, shl_cpt_rev, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, shr_cpt_rev, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8u, shr_cpt_rev, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, sub_cpt_rev, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float4, div_cpt_rev, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float4, sub_cpt_rev, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float8, div_cpt_rev, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float8, sub_cpt_rev, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_CPT_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
/*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/\
OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \
OP_CRITICAL_CPT_REV(OP,LCK_ID) \
}
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL_CPT_REV( float10, sub_cpt_rev, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( float10, div_cpt_rev, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL_CPT_REV( float16, sub_cpt_rev, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( float16, div_cpt_rev, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT_REV( float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt_rev
ATOMIC_CRITICAL_CPT_REV( float16, div_a16_cpt_rev, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt_rev
#endif
#endif
// routines for complex types
// ------------------------------------------------------------------------
// Workaround for cmplx4. Regular routines with return value don't work
// on Win_32e. Let's return captured values through the additional parameter.
#define OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
(*lhs) = (rhs) OP (*lhs); \
(*out) = (*lhs); \
} else { \
(*out) = (*lhs); \
(*lhs) = (rhs) OP (*lhs); \
} \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT_REV_WRK( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \
OP_GOMP_CRITICAL_CPT_REV_WRK(OP,GOMP_FLAG) \
OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \
}
// The end of workaround for cmplx4
// !!! TODO: check if we need to return void for cmplx4 routines
// cmplx4 routines to return void
ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt_rev
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt_rev
#endif
#endif
// OpenMP 4.0 Capture-write (swap): {v = x; x = expr;}
#define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid ));
#define CRITICAL_SWP(LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
old_value = (*lhs); \
(*lhs) = rhs; \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return old_value;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_CRITICAL_SWP(FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
CRITICAL_SWP( 0 ); \
}
#else
#define GOMP_CRITICAL_SWP(FLAG)
#endif /* KMP_GOMP_COMPAT */
#define ATOMIC_XCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
old_value = KMP_XCHG_FIXED##BITS( lhs, rhs ); \
return old_value; \
}
// ------------------------------------------------------------------------
#define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
old_value = KMP_XCHG_REAL##BITS( lhs, rhs ); \
return old_value; \
}
// ------------------------------------------------------------------------
#define CMPXCHG_SWP(TYPE,BITS) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
} \
return old_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
CMPXCHG_SWP(TYPE,BITS) \
}
ATOMIC_XCHG_SWP( fixed1, kmp_int8, 8, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_swp
ATOMIC_XCHG_SWP( fixed2, kmp_int16, 16, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_swp
ATOMIC_XCHG_SWP( fixed4, kmp_int32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_swp
ATOMIC_XCHG_FLOAT_SWP( float4, kmp_real32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_float4_swp
#if ( KMP_ARCH_X86 )
ATOMIC_CMPXCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp
ATOMIC_CMPXCHG_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp
#else
ATOMIC_XCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp
ATOMIC_XCHG_FLOAT_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp
#endif
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
#define ATOMIC_CRITICAL_SWP(TYPE_ID,TYPE,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
CRITICAL_SWP(LCK_ID) \
}
// ------------------------------------------------------------------------
// !!! TODO: check if we need to return void for cmplx4 routines
// Workaround for cmplx4. Regular routines with return value don't work
// on Win_32e. Let's return captured values through the additional parameter.
#define ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \
void __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out ) \
{ \
KMP_DEBUG_ASSERT( __kmp_init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid ));
#define CRITICAL_SWP_WRK(LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
tmp = (*lhs); \
(*lhs) = (rhs); \
(*out) = tmp; \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_CRITICAL_SWP_WRK(FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
CRITICAL_SWP_WRK( 0 ); \
}
#else
#define GOMP_CRITICAL_SWP_WRK(FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \
TYPE tmp; \
GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \
CRITICAL_SWP_WRK(LCK_ID) \
}
// The end of workaround for cmplx4
ATOMIC_CRITICAL_SWP( float10, long double, 10r, 1 ) // __kmpc_atomic_float10_swp
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_SWP( float16, QUAD_LEGACY, 16r, 1 ) // __kmpc_atomic_float16_swp
#endif
// cmplx4 routine to return void
ATOMIC_CRITICAL_SWP_WRK( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp
//ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp
ATOMIC_CRITICAL_SWP( cmplx8, kmp_cmplx64, 16c, 1 ) // __kmpc_atomic_cmplx8_swp
ATOMIC_CRITICAL_SWP( cmplx10, kmp_cmplx80, 20c, 1 ) // __kmpc_atomic_cmplx10_swp
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_SWP( cmplx16, CPLX128_LEG, 32c, 1 ) // __kmpc_atomic_cmplx16_swp
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_SWP( float16_a16, Quad_a16_t, 16r, 1 ) // __kmpc_atomic_float16_a16_swp
ATOMIC_CRITICAL_SWP( cmplx16_a16, kmp_cmplx128_a16_t, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_swp
#endif
#endif
// End of OpenMP 4.0 Capture
#endif //OMP_40_ENABLED
#endif //KMP_ARCH_X86 || KMP_ARCH_X86_64
#undef OP_CRITICAL
/* ------------------------------------------------------------------------ */
/* Generic atomic routines */
/* ------------------------------------------------------------------------ */
void
__kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_init_serial );
if (
#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)
FALSE /* must use lock */
#else
TRUE
#endif
)
{
kmp_int8 old_value, new_value;
old_value = *(kmp_int8 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ8 ( (kmp_int8 *) lhs,
*(kmp_int8 *) &old_value, *(kmp_int8 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int8 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
}
else {
//
// All 1-byte data is of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_1i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_1i, gtid );
}
}
void
__kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
if (
#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)
FALSE /* must use lock */
#elif KMP_ARCH_X86 || KMP_ARCH_X86_64
TRUE /* no alignment problems */
#else
! ( (kmp_uintptr_t) lhs & 0x1) /* make sure address is 2-byte aligned */
#endif
)
{
kmp_int16 old_value, new_value;
old_value = *(kmp_int16 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ16 ( (kmp_int16 *) lhs,
*(kmp_int16 *) &old_value, *(kmp_int16 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int16 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
}
else {
//
// All 2-byte data is of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_2i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_2i, gtid );
}
}
void
__kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_init_serial );
if (
//
// FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints.
// Gomp compatibility is broken if this routine is called for floats.
//
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
TRUE /* no alignment problems */
#else
! ( (kmp_uintptr_t) lhs & 0x3) /* make sure address is 4-byte aligned */
#endif
)
{
kmp_int32 old_value, new_value;
old_value = *(kmp_int32 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ32 ( (kmp_int32 *) lhs,
*(kmp_int32 *) &old_value, *(kmp_int32 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int32 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
}
else {
//
// Use __kmp_atomic_lock_4i for all 4-byte data,
// even if it isn't of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_4i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_4i, gtid );
}
}
void
__kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_init_serial );
if (
#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)
FALSE /* must use lock */
#elif KMP_ARCH_X86 || KMP_ARCH_X86_64
TRUE /* no alignment problems */
#else
! ( (kmp_uintptr_t) lhs & 0x7) /* make sure address is 8-byte aligned */
#endif
)
{
kmp_int64 old_value, new_value;
old_value = *(kmp_int64 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ64 ( (kmp_int64 *) lhs,
*(kmp_int64 *) &old_value,
*(kmp_int64 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int64 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
} else {
//
// Use __kmp_atomic_lock_8i for all 8-byte data,
// even if it isn't of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_8i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_8i, gtid );
}
}
void
__kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_10r, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_10r, gtid );
}
void
__kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_16c, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_16c, gtid );
}
void
__kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_20c, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_20c, gtid );
}
void
__kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_32c, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_32c, gtid );
}
// AC: same two routines as GOMP_atomic_start/end, but will be called by our compiler
// duplicated in order to not use 3-party names in pure Intel code
// TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin.
void
__kmpc_atomic_start(void)
{
int gtid = __kmp_entry_gtid();
KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid));
__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
}
void
__kmpc_atomic_end(void)
{
int gtid = __kmp_get_gtid();
KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid));
__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
}
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
/*!
@}
*/
// end of file
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
static const char
*MinimalThresholdMap =
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
register const Quantum
*magick_restrict p,
*magick_restrict pixels;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image)*(image->columns+1);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoThresholdImage() automatically selects a threshold and replaces each
% pixel in the image with a black pixel if the image intentsity is less than
% the selected threshold otherwise white.
%
% The format of the AutoThresholdImage method is:
%
% MagickBooleanType AutoThresholdImage(Image *image,
% const AutoThresholdMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-threshold.
%
% o method: choose from Kapur, OTSU, or Triangle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double KapurThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
#define MaxIntensity 255
double
*black_entropy,
*cumulative_histogram,
entropy,
epsilon,
maximum_entropy,
*white_entropy;
register ssize_t
i,
j;
size_t
threshold;
/*
Compute optimal threshold from the entopy of the histogram.
*/
cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*cumulative_histogram));
black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*black_entropy));
white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*white_entropy));
if ((cumulative_histogram == (double *) NULL) ||
(black_entropy == (double *) NULL) || (white_entropy == (double *) NULL))
{
if (white_entropy != (double *) NULL)
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
if (black_entropy != (double *) NULL)
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
if (cumulative_histogram != (double *) NULL)
cumulative_histogram=(double *)
RelinquishMagickMemory(cumulative_histogram);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Entropy for black and white parts of the histogram.
*/
cumulative_histogram[0]=histogram[0];
for (i=1; i <= MaxIntensity; i++)
cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i];
epsilon=MagickMinimumValue;
for (j=0; j <= MaxIntensity; j++)
{
/*
Black entropy.
*/
black_entropy[j]=0.0;
if (cumulative_histogram[j] > epsilon)
{
entropy=0.0;
for (i=0; i <= j; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/cumulative_histogram[j]*
log(histogram[i]/cumulative_histogram[j]);
black_entropy[j]=entropy;
}
/*
White entropy.
*/
white_entropy[j]=0.0;
if ((1.0-cumulative_histogram[j]) > epsilon)
{
entropy=0.0;
for (i=j+1; i <= MaxIntensity; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/(1.0-cumulative_histogram[j])*
log(histogram[i]/(1.0-cumulative_histogram[j]));
white_entropy[j]=entropy;
}
}
/*
Find histogram bin with maximum entropy.
*/
maximum_entropy=black_entropy[0]+white_entropy[0];
threshold=0;
for (j=1; j <= MaxIntensity; j++)
if ((black_entropy[j]+white_entropy[j]) > maximum_entropy)
{
maximum_entropy=black_entropy[j]+white_entropy[j];
threshold=(size_t) j;
}
/*
Free resources.
*/
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram);
return(100.0*threshold/MaxIntensity);
}
static double OTSUThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
max_sigma,
*myu,
*omega,
*probability,
*sigma,
threshold;
register ssize_t
i;
/*
Compute optimal threshold from maximization of inter-class variance.
*/
myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu));
omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega));
probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*probability));
sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma));
if ((myu == (double *) NULL) || (omega == (double *) NULL) ||
(probability == (double *) NULL) || (sigma == (double *) NULL))
{
if (sigma != (double *) NULL)
sigma=(double *) RelinquishMagickMemory(sigma);
if (probability != (double *) NULL)
probability=(double *) RelinquishMagickMemory(probability);
if (omega != (double *) NULL)
omega=(double *) RelinquishMagickMemory(omega);
if (myu != (double *) NULL)
myu=(double *) RelinquishMagickMemory(myu);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Calculate probability density.
*/
for (i=0; i <= (ssize_t) MaxIntensity; i++)
probability[i]=histogram[i];
/*
Generate probability of graylevels and mean value for separation.
*/
omega[0]=probability[0];
myu[0]=0.0;
for (i=1; i <= (ssize_t) MaxIntensity; i++)
{
omega[i]=omega[i-1]+probability[i];
myu[i]=myu[i-1]+i*probability[i];
}
/*
Sigma maximization: inter-class variance and compute optimal threshold.
*/
threshold=0;
max_sigma=0.0;
for (i=0; i < (ssize_t) MaxIntensity; i++)
{
sigma[i]=0.0;
if ((omega[i] != 0.0) && (omega[i] != 1.0))
sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0-
omega[i]));
if (sigma[i] > max_sigma)
{
max_sigma=sigma[i];
threshold=(double) i;
}
}
/*
Free resources.
*/
myu=(double *) RelinquishMagickMemory(myu);
omega=(double *) RelinquishMagickMemory(omega);
probability=(double *) RelinquishMagickMemory(probability);
sigma=(double *) RelinquishMagickMemory(sigma);
return(100.0*threshold/MaxIntensity);
}
static double TriangleThreshold(const double *histogram,
ExceptionInfo *exception)
{
double
a,
b,
c,
count,
distance,
inverse_ratio,
max_distance,
segment,
x1,
x2,
y1,
y2;
register ssize_t
i;
ssize_t
end,
max,
start,
threshold;
/*
Compute optimal threshold with triangle algorithm.
*/
(void) exception;
start=0; /* find start bin, first bin not zero count */
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > 0.0)
{
start=i;
break;
}
end=0; /* find end bin, last bin not zero count */
for (i=(ssize_t) MaxIntensity; i >= 0; i--)
if (histogram[i] > 0.0)
{
end=i;
break;
}
max=0; /* find max bin, bin with largest count */
count=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > count)
{
max=i;
count=histogram[i];
}
/*
Compute threshold at split point.
*/
x1=(double) max;
y1=histogram[max];
x2=(double) end;
if ((max-start) >= (end-max))
x2=(double) start;
y2=0.0;
a=y1-y2;
b=x2-x1;
c=(-1.0)*(a*x1+b*y1);
inverse_ratio=1.0/sqrt(a*a+b*b+c*c);
threshold=0;
max_distance=0.0;
if (x2 == (double) start)
for (i=start; i < max; i++)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment > 0.0))
{
threshold=i;
max_distance=distance;
}
}
else
for (i=end; i > max; i--)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment < 0.0))
{
threshold=i;
max_distance=distance;
}
}
return(100.0*threshold/MaxIntensity);
}
MagickExport MagickBooleanType AutoThresholdImage(Image *image,
const AutoThresholdMethod method,ExceptionInfo *exception)
{
CacheView
*image_view;
char
property[MagickPathExtent];
double
gamma,
*histogram,
sum,
threshold;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
/*
Form histogram.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
(void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double intensity = GetPixelIntensity(image,p);
histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Normalize histogram.
*/
sum=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
sum+=histogram[i];
gamma=PerceptibleReciprocal(sum);
for (i=0; i <= (ssize_t) MaxIntensity; i++)
histogram[i]=gamma*histogram[i];
/*
Discover threshold from histogram.
*/
switch (method)
{
case KapurThresholdMethod:
{
threshold=KapurThreshold(image,histogram,exception);
break;
}
case OTSUThresholdMethod:
default:
{
threshold=OTSUThreshold(image,histogram,exception);
break;
}
case TriangleThresholdMethod:
{
threshold=TriangleThreshold(histogram,exception);
break;
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
if (threshold < 0.0)
status=MagickFalse;
if (status == MagickFalse)
return(MagickFalse);
/*
Threshold image.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold);
(void) SetImageProperty(image,"auto-threshold:threshold",property,exception);
return(BilevelImage(image,QuantumRange*threshold/100.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel((MagickRealType) q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClampImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
ThresholdMap
*map;
map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
#if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT)
{
const StringInfo
*option;
LinkedListInfo
*options;
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
}
#endif
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map));
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MagickPathExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MagickPathExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
{
GetNextToken(p,&p,MagickPathExtent,token);
for (i=0; (i < MaxPixelChannels); i++)
levels[i]=StringToDouble(token,(char **) NULL);
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
ssize_t
n;
n=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
level,
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n]) < MagickEpsilon)
{
n++;
continue;
}
threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[i]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DitherImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PerceptibleImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low,high: Specify the high and low thresholds. These values range from
% 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const double min_threshold, const double max_threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&threshold);
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n g e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RangeThresholdImage() applies soft and hard thresholding.
%
% The format of the RangeThresholdImage method is:
%
% MagickBooleanType RangeThresholdImage(Image *image,
% const double low_black,const double low_white,const double high_white,
% const double high_black,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low_black: Define the minimum threshold value.
%
% o low_white: Define the maximum threshold value.
%
% o high_white: Define the minimum threshold value.
%
% o low_white: Define the maximum threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RangeThresholdImage(Image *image,
const double low_black,const double low_white,const double high_white,
const double high_black,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Range threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < low_black)
q[i]=0;
else
if ((pixel >= low_black) && (pixel < low_white))
q[i]=ClampToQuantum(QuantumRange*
PerceptibleReciprocal(low_white-low_black)*(pixel-low_black));
else
if ((pixel >= low_white) && (pixel <= high_white))
q[i]=QuantumRange;
else
if ((pixel > high_white) && (pixel <= high_black))
q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal(
high_black-high_white)*(high_black-pixel));
else
if (pixel > high_black)
q[i]=0;
else
q[i]=0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
main.c | //====================================================================================================100
// UPDATE
//====================================================================================================100
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "define.c"
#include "graphics.c"
#include "resize.c"
#include "timer.c"
#include "BenchmarksUtil.h"
#include <sys/time.h>
#define ERROR_THRESHOLD 0.05
int compareResults(fp *image, fp *image_cpu, int Ne) {
int i, fail;
fail = 0;
for (i = 0; i < Ne; i++) {
if (percentDiff(image[i], image_cpu[i]) > ERROR_THRESHOLD) {
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
//====================================================================================================100
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
//====================================================================================================100
int main(int argc, char *argv[]) {
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
double t_start, t_end, t_gpu, t_cpu;
time0 = get_time();
// inputs image, input paramenters
fp *image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp *image, *image_cpu; // input image
int Nr, Nc; // IMAGE nbr of rows/cols/elements
int Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1, r2, c1, c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// ROI statistics
fp meanROI, varROI, q0sqr; // local region statistics
// surrounding pixel indicies
int *iN, *iS, *jE, *jW;
// center pixel value
fp Jc;
// directional derivatives
fp *dN, *dS, *dW, *dE;
// calculation variables
fp tmp, sum, sum2;
fp G2, L, num, den, qsqr, D;
// diffusion coefficient
fp *c;
fp cN, cS, cW, cE;
// counters
int iter; // primary loop
int i, j; // image row/col
int k; // image single index
// number of threads
int threads;
time1 = get_time();
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
if (argc != 6) {
printf("ERROR: wrong number of arguments\n");
return 0;
} else {
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502 in the original image
Nc = atoi(argv[4]); // it is 458 in the original image
threads = atoi(argv[5]);
}
// omp_set_num_threads(threads);
// printf("THREAD %d\n", omp_get_thread_num());
// printf("NUMBER OF THREADS: %d\n", omp_get_num_threads());
time2 = get_time();
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp *)malloc(sizeof(fp) * image_ori_elem);
read_graphics("../input/image.pgm", image_ori, image_ori_rows, image_ori_cols,
1);
time3 = get_time();
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr * Nc;
image = (fp *)malloc(sizeof(fp) * Ne);
image_cpu = (fp *)malloc(sizeof(fp) * Ne);
resize(image_ori, image_ori_rows, image_ori_cols, image, image_cpu, Nr, Nc,
1);
time4 = get_time();
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI = (r2 - r1 + 1) * (c2 - c1 + 1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
iN = malloc(sizeof(int *) * Nr); // north surrounding element
iS = malloc(sizeof(int *) * Nr); // south surrounding element
jW = malloc(sizeof(int *) * Nc); // west surrounding element
jE = malloc(sizeof(int *) * Nc); // east surrounding element
// allocate variables for directional derivatives
dN = malloc(sizeof(fp) * Ne); // north direction derivative
dS = malloc(sizeof(fp) * Ne); // south direction derivative
dW = malloc(sizeof(fp) * Ne); // west direction derivative
dE = malloc(sizeof(fp) * Ne); // east direction derivative
// allocate variable for diffusion coefficient
c = malloc(sizeof(fp) * Ne); // diffusion coefficient
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
// #pragma omp parallel
for (i = 0; i < Nr; i++) {
iN[i] = i - 1; // holds index of IMAGE row above
iS[i] = i + 1; // holds index of IMAGE row below
}
// #pragma omp parallel
for (j = 0; j < Nc; j++) {
jW[j] = j - 1; // holds index of IMAGE column on the left
jE[j] = j + 1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of
// IMAGE
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr - 1] = Nr - 1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc - 1] = Nc - 1; // changes IMAGE rightmost column index from Nc to Nc-1
time5 = get_time();
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
// #pragma omp parallel
for (i = 0; i < Ne; i++) { // do for the number of elements in input IMAGE
image[i] = exp(image[i] /
255); // exponentiate input IMAGE and copy to output image
image_cpu[i] =
exp(image_cpu[i] /
255); // exponentiate input IMAGE and copy to output image
}
time6 = get_time();
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
// primary loop
// CPU
t_start = rtclock();
for (iter = 0; iter < niter;
iter++) { // do for the number of iterations input parameter
// ROI statistics for entire ROI (single number for ROI)
sum = 0;
sum2 = 0;
for (i = r1; i <= r2; i++) { // do for the range of rows in ROI
for (j = c1; j <= c2; j++) { // do for the range of columns in ROI
tmp = image_cpu[i + Nr * j]; // get coresponding value in IMAGE
sum += tmp; // take corresponding value and add to sum
sum2 += tmp * tmp; // take square of corresponding value and add to sum2
}
}
meanROI = sum / NeROI; // gets mean (average) value of element in ROI
varROI = (sum2 / NeROI) - meanROI * meanROI; // gets variance of ROI
q0sqr = varROI / (meanROI * meanROI); // gets standard deviation of ROI
// directional derivatives, ICOV, diffusion coefficent
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index/pixel
k = i + Nr * j; // get position of current element
Jc = image_cpu[k]; // get value of the current element
// directional derivates (every element of IMAGE)
dN[k] = image_cpu[iN[i] + Nr * j] - Jc; // north direction derivative
dS[k] = image_cpu[iS[i] + Nr * j] - Jc; // south direction derivative
dW[k] = image_cpu[i + Nr * jW[j]] - Jc; // west direction derivative
dE[k] = image_cpu[i + Nr * jE[j]] - Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
G2 = (dN[k] * dN[k] + dS[k] * dS[k] // gradient (based on derivatives)
+ dW[k] * dW[k] + dE[k] * dE[k]) /
(Jc * Jc);
// normalized discrete laplacian (equ 54)
L = (dN[k] + dS[k] + dW[k] + dE[k]) /
Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
num = (0.5 * G2) -
((1.0 / 16.0) * (L * L)); // num (based on gradient and laplacian)
den = 1 + (.25 * L); // den (based on laplacian)
qsqr = num / (den * den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
den = (qsqr - q0sqr) /
(q0sqr * (1 + q0sqr)); // den (based on qsqr and q0sqr)
c[k] = 1.0 / (1.0 + den); // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (c[k] < 0) // if diffusion coefficient < 0
{
c[k] = 0; // ... set to 0
} else if (c[k] > 1) // if diffusion coefficient > 1
{
c[k] = 1; // ... set to 1
}
}
}
// divergence & image update
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
// printf("NUMBER OF THREADS: %d\n", omp_get_num_threads());
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index
k = i + Nr * j; // get position of current element
// diffusion coefficent
cN = c[k]; // north diffusion coefficient
cS = c[iS[i] + Nr * j]; // south diffusion coefficient
cW = c[k]; // west diffusion coefficient
cE = c[i + Nr * jE[j]]; // east diffusion coefficient
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]; // divergence
// image update (equ 61) (every element of IMAGE)
image_cpu[k] =
image_cpu[k] +
0.25 * lambda *
D; // updates image (based on input time step and divergence)
}
}
}
t_end = rtclock();
t_cpu = t_end - t_start;
// GPU
t_start = rtclock();
#pragma omp target map( \
to : iN[ : Nr], iS[ : Nr], \
jW[ : Nc], jE[ : Nc]) map( \
tofrom : dN[ : Ne], \
dS[ : Ne], \
dW[ : Ne], \
dE[ : Ne], \
c[ : Ne], image[ : Ne]) \
device(DEVICE_ID)
{
for (iter = 0; iter < niter;
iter++) { // do for the number of iterations input parameter
// ROI statistics for entire ROI (single number for ROI)
sum = 0;
sum2 = 0;
for (i = r1; i <= r2; i++) { // do for the range of rows in ROI
for (j = c1; j <= c2; j++) { // do for the range of columns in ROI
tmp = image[i + Nr * j]; // get coresponding value in IMAGE
sum += tmp; // take corresponding value and add to sum
sum2 +=
tmp * tmp; // take square of corresponding value and add to sum2
}
}
meanROI = sum / NeROI; // gets mean (average) value of element in ROI
varROI = (sum2 / NeROI) - meanROI * meanROI; // gets variance of ROI
q0sqr = varROI / (meanROI * meanROI); // gets standard deviation of ROI
// directional derivatives, ICOV, diffusion coefficent
// #pragma omp target device(DEVICE_ID)
// #pragma omp target map(to: iN[:Nr], iS[:Nr], jW[:Nc], jE[:Nc], image[:Ne] ) \
// map(tofrom: dN[:Ne], dS[:Ne], dW[:Ne], dE[:Ne], c[:Ne])
// {
#pragma omp parallel for collapse(1)
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index/pixel
k = i + Nr * j; // get position of current element
Jc = image[k]; // get value of the current element
// directional derivates (every element of IMAGE)
dN[k] = image[iN[i] + Nr * j] - Jc; // north direction derivative
dS[k] = image[iS[i] + Nr * j] - Jc; // south direction derivative
dW[k] = image[i + Nr * jW[j]] - Jc; // west direction derivative
dE[k] = image[i + Nr * jE[j]] - Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
G2 = (dN[k] * dN[k] + dS[k] * dS[k] // gradient (based on derivatives)
+ dW[k] * dW[k] + dE[k] * dE[k]) /
(Jc * Jc);
// normalized discrete laplacian (equ 54)
L = (dN[k] + dS[k] + dW[k] + dE[k]) /
Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
num = (0.5 * G2) - ((1.0 / 16.0) *
(L * L)); // num (based on gradient and laplacian)
den = 1 + (.25 * L); // den (based on laplacian)
qsqr = num / (den * den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
den = (qsqr - q0sqr) /
(q0sqr * (1 + q0sqr)); // den (based on qsqr and q0sqr)
c[k] = 1.0 / (1.0 + den); // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (c[k] < 0) // if diffusion coefficient < 0
{
c[k] = 0; // ... set to 0
} else if (c[k] > 1) // if diffusion coefficient > 1
{
c[k] = 1; // ... set to 1
}
}
}
// }
// divergence & image update
// #pragma omp target device(DEVICE_ID)
// #pragma omp target map(to: c[:Ne], iS[:Nr], jE[:Nc], dN[:Ne], dS[:Ne], dW[:Ne], dE[:Ne] ) \
map(tofrom: image[:Ne])
// {
#pragma omp parallel for collapse(1)
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
// printf("NUMBER OF THREADS: %d\n", omp_get_num_threads());
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index
k = i + Nr * j; // get position of current element
// diffusion coefficent
cN = c[k]; // north diffusion coefficient
cS = c[iS[i] + Nr * j]; // south diffusion coefficient
cW = c[k]; // west diffusion coefficient
cE = c[i + Nr * jE[j]]; // east diffusion coefficient
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]; // divergence
// image update (equ 61) (every element of IMAGE)
image[k] =
image[k] +
0.25 * lambda *
D; // updates image (based on input time step and divergence)
}
}
// }
}
}
t_end = rtclock();
t_gpu = t_end - t_start;
// printf("\n");
time7 = get_time();
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
// #pragma omp parallel
for (i = 0; i < Ne; i++) { // do for the number of elements in IMAGE
image[i] = log(image[i]) * 255; // take logarithm of image, log compress
image_cpu[i] =
log(image_cpu[i]) * 255; // take logarithm of image, log compress
}
time8 = get_time();
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics("image_out.pgm", image, Nr, Nc, 1, 255);
time9 = get_time();
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(iN);
free(iS);
free(jW);
free(jE); // deallocate surrounding pixel memory
free(dN);
free(dS);
free(dW);
free(dE); // deallocate directional derivative memory
free(c); // deallocate diffusion coefficient memory
time10 = get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%.12f s, %.12f % : SETUP VARIABLES\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : READ COMMAND LINE PARAMETERS\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : READ IMAGE FROM FILE\n",
(float)(time3 - time2) / 1000000,
(float)(time3 - time2) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : RESIZE IMAGE\n", (float)(time4 - time3) / 1000000,
(float)(time4 - time3) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : SETUP, MEMORY ALLOCATION\n",
(float)(time5 - time4) / 1000000,
(float)(time5 - time4) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : EXTRACT IMAGE\n", (float)(time6 - time5) / 1000000,
(float)(time6 - time5) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : COMPUTE\n", (float)(time7 - time6) / 1000000,
(float)(time7 - time6) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : COMPRESS IMAGE\n",
(float)(time8 - time7) / 1000000,
(float)(time8 - time7) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : SAVE IMAGE INTO FILE\n",
(float)(time9 - time8) / 1000000,
(float)(time9 - time8) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : FREE MEMORY\n", (float)(time10 - time9) / 1000000,
(float)(time10 - time9) / (float)(time10 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time10 - time0) / 1000000);
printf("\n\n");
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_gpu);
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_cpu);
compareResults(image, image_cpu, Ne);
free(image);
free(image_cpu);
//====================================================================================================100
// END OF FILE
//====================================================================================================100
}
|
GB_unop__identity_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint8_fp64
// op(A') function: GB_unop_tran__identity_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij = GB_cast_to_uint8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint8_fp64
(
uint8_t *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_last = kind_pointer
};
public:
SYCLIntegrationHeader(DiagnosticsEngine &Diag, bool UnnamedLambdaSupport,
Sema &S);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(const StringRef &MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(StringRef KernelName, QualType KernelNameType,
StringRef KernelStableName, SourceLocation Loc);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
/// Registers a specialization constant to emit info for it into the header.
void addSpecConstant(StringRef IDName, QualType IDType);
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// Kernel invocation descriptor
struct KernelDesc {
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
SourceLocation KernelLocation;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
KernelDesc() = default;
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
/// Emits a forward declaration for given declaration.
void emitFwdDecl(raw_ostream &O, const Decl *D,
SourceLocation KernelLocation);
/// Emits forward declarations of classes and template classes on which
/// declaration of given type depends. See example in the comments for the
/// implementation.
/// \param O
/// stream to emit to
/// \param T
/// type to emit forward declarations for
/// \param KernelLocation
/// source location of the SYCL kernel function, used to emit nicer
/// diagnostic messages if kernel name is missing
/// \param Emitted
/// a set of declarations forward declrations has been emitted for already
void emitForwardClassDecls(raw_ostream &O, QualType T,
SourceLocation KernelLocation,
llvm::SmallPtrSetImpl<const void *> &Emitted);
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
using SpecConstID = std::pair<QualType, std::string>;
/// Keeps specialization constants met in the translation unit. Maps spec
/// constant's ID type to generated unique name. Duplicates are removed at
/// integration header emission time.
llvm::SmallVector<SpecConstID, 4> SpecConsts;
/// Used for emitting diagnostics.
DiagnosticsEngine &Diag;
/// Whether header is generated with unnamed lambda support
bool UnnamedLambdaSupport;
Sema &S;
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<unsigned> FpPragmaStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallSetVector<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {}
~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
template <typename FPGALoopAttrT>
FPGALoopAttrT *BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E = nullptr);
LoopUnrollHintAttr *BuildLoopUnrollHintAttr(const AttributeCommonInfo &A,
Expr *E);
OpenCLUnrollHintAttr *
BuildOpenCLLoopUnrollHintAttr(const AttributeCommonInfo &A, Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.allowAssociativeMath() &&
!CurFPFeatures.noSignedZeros() &&
!CurFPFeatures.allowReciprocalMath() &&
!CurFPFeatures.allowApproximateFunctions();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
template <typename AttrType>
bool checkRangedIntegralArgument(Expr *E, const AttrType *TmpAttr,
ExprResult &Result);
template <typename AttrType>
void AddOneConstantValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
template <typename AttrType>
void AddOneConstantPowerTwoValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addSYCLIntelPipeIOAttr - Adds a pipe I/O attribute to a particular
/// declaration.
void addSYCLIntelPipeIOAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ID);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
// Adds an intel_reqd_sub_group_size attribute to a particular declaration.
void addIntelReqdSubGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope.
FunctionDecl *
ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
Declarator &D);
/// Register \p FD as specialization of \p BaseFD in the current `omp
/// begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *FD, FunctionDecl *BaseFD);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckIntelFPGARegBuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool CheckIntelFPGAMemBuiltinFunctionCall(CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
llvm::SetVector<Decl *> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
// Used to suppress diagnostics during kernel construction, since these were
// already emitted earlier. Diagnosing during Kernel emissions also skips the
// useful notes that shows where the kernel was called.
bool ConstructingOpenCLKernel = false;
public:
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); }
llvm::SetVector<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(
getDiagnostics(), getLangOpts().SYCLUnnamedLambda, *this);
return *SyclIntHeader.get();
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelCallDllimportFunction,
KernelCallVariadicFunction,
KernelCallUndefinedFunction
};
bool isKnownGoodSYCLDecl(const Decl *D);
void checkSYCLDeviceVarDecl(VarDecl *Var);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void MarkDevice();
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Finishes analysis of the deferred functions calls that may be not
/// properly declared for device compilation.
void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Tells whether given variable is a SYCL explicit SIMD extension's "private
/// global" variable - global variable in the private address space.
bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) {
return getLangOpts().SYCLIsDevice && getLangOpts().SYCLExplicitSIMD &&
VDecl->hasGlobalStorage() &&
(VDecl->getType().getAddressSpace() == LangAS::opencl_private);
}
};
template <typename AttrType>
void Sema::AddOneConstantValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E) {
AttrType TmpAttr(Context, CI, E);
if (!E->isValueDependent()) {
ExprResult ICE;
if (checkRangedIntegralArgument<AttrType>(E, &TmpAttr, ICE))
return;
E = ICE.get();
}
if (IntelFPGAPrivateCopiesAttr::classof(&TmpAttr)) {
if (!D->hasAttr<IntelFPGAMemoryAttr>())
D->addAttr(IntelFPGAMemoryAttr::CreateImplicit(
Context, IntelFPGAMemoryAttr::Default));
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
template <typename AttrType>
void Sema::AddOneConstantPowerTwoValueAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E) {
AttrType TmpAttr(Context, CI, E);
if (!E->isValueDependent()) {
ExprResult ICE;
if (checkRangedIntegralArgument<AttrType>(E, &TmpAttr, ICE))
return;
Expr::EvalResult Result;
E->EvaluateAsInt(Result, Context);
llvm::APSInt Value = Result.Val.getInt();
if (!Value.isPowerOf2()) {
Diag(CI.getLoc(), diag::err_attribute_argument_not_power_of_two)
<< &TmpAttr;
return;
}
if (IntelFPGANumBanksAttr::classof(&TmpAttr)) {
if (auto *BBA = D->getAttr<IntelFPGABankBitsAttr>()) {
unsigned NumBankBits = BBA->args_size();
if (NumBankBits != Value.ceilLogBase2()) {
Diag(TmpAttr.getLocation(), diag::err_bankbits_numbanks_conflicting);
return;
}
}
}
E = ICE.get();
}
if (!D->hasAttr<IntelFPGAMemoryAttr>())
D->addAttr(IntelFPGAMemoryAttr::CreateImplicit(
Context, IntelFPGAMemoryAttr::Default));
// We are adding a user NumBanks, drop any implicit default.
if (IntelFPGANumBanksAttr::classof(&TmpAttr)) {
if (auto *NBA = D->getAttr<IntelFPGANumBanksAttr>())
if (NBA->isImplicit())
D->dropAttr<IntelFPGANumBanksAttr>();
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
template <typename FPGALoopAttrT>
FPGALoopAttrT *Sema::BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E) {
if (!E && !(A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGALoopCoalesce))
return nullptr;
if (E && !E->isInstantiationDependent()) {
llvm::APSInt ArgVal(32);
if (!E->isIntegerConstantExpr(ArgVal, getASTContext())) {
Diag(E->getExprLoc(), diag::err_attribute_argument_type)
<< A.getAttrName() << AANT_ArgumentIntegerConstant
<< E->getSourceRange();
return nullptr;
}
int Val = ArgVal.getSExtValue();
if (A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGAII ||
A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGALoopCoalesce) {
if (Val <= 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< A.getAttrName() << /* positive */ 0;
return nullptr;
}
} else if (A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGAMaxConcurrency ||
A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGAMaxInterleaving ||
A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGASpeculatedIterations) {
if (Val < 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< A.getAttrName() << /* non-negative */ 1;
return nullptr;
}
} else {
llvm_unreachable("unknown sycl fpga loop attr");
}
}
return new (Context) FPGALoopAttrT(Context, A, E);
}
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
GB_unaryop__ainv_uint64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint64_int16
// op(A') function: GB_tran__ainv_uint64_int16
// C type: uint64_t
// A type: int16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint64_int16
(
uint64_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_winograd_dot_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_winograd_dot_pack4_sse(Mat& bottom_blob_tm, int outch, const Mat& kernel_tm, Mat& top_blob_tm, const Option& opt)
{
// Mat bottom_blob_tm(tiles, 16/36/64, inch, 16u, 4, opt.workspace_allocator);
const int tiles = bottom_blob_tm.w;
const int batch = bottom_blob_tm.h;
const int inch = bottom_blob_tm.c;
// permute
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, batch, 16u, 4, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, batch, 16u, 4, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, batch, 16u, 4, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, batch, 16u, 4, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, batch, 16u, 4, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < batch; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x12
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
__m128 _r4 = _mm_load_ps(r0 + 4 * 4);
__m128 _r5 = _mm_load_ps(r0 + 4 * 5);
__m128 _r6 = _mm_load_ps(r0 + 4 * 6);
__m128 _r7 = _mm_load_ps(r0 + 4 * 7);
__m128 _r8 = _mm_load_ps(r0 + 4 * 8);
__m128 _r9 = _mm_load_ps(r0 + 4 * 9);
__m128 _ra = _mm_load_ps(r0 + 4 * 10);
__m128 _rb = _mm_load_ps(r0 + 4 * 11);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r4);
_mm_store_ps(tmpptr + 4 * 2, _r8);
_mm_store_ps(tmpptr + 4 * 3, _r1);
_mm_store_ps(tmpptr + 4 * 4, _r5);
_mm_store_ps(tmpptr + 4 * 5, _r9);
_mm_store_ps(tmpptr + 4 * 6, _r2);
_mm_store_ps(tmpptr + 4 * 7, _r6);
_mm_store_ps(tmpptr + 4 * 8, _ra);
_mm_store_ps(tmpptr + 4 * 9, _r3);
_mm_store_ps(tmpptr + 4 * 10, _r7);
_mm_store_ps(tmpptr + 4 * 11, _rb);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 48;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
__m128 _r4 = _mm_load_ps(r0 + 4 * 4);
__m128 _r5 = _mm_load_ps(r0 + 4 * 5);
__m128 _r6 = _mm_load_ps(r0 + 4 * 6);
__m128 _r7 = _mm_load_ps(r0 + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r4);
_mm_store_ps(tmpptr + 4 * 2, _r1);
_mm_store_ps(tmpptr + 4 * 3, _r5);
_mm_store_ps(tmpptr + 4 * 4, _r2);
_mm_store_ps(tmpptr + 4 * 5, _r6);
_mm_store_ps(tmpptr + 4 * 6, _r3);
_mm_store_ps(tmpptr + 4 * 7, _r7);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 32;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x4
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r1);
_mm_store_ps(tmpptr + 4 * 2, _r2);
_mm_store_ps(tmpptr + 4 * 3, _r3);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x2
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1);
__m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1);
_mm_store_ps(tmpptr, _r01_0);
_mm_store_ps(tmpptr + 4, _r01_1);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 8;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
__m128 _val = _mm_load_ps(r0);
_mm_store_ps(tmpptr, _val);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, batch, outch, 16u, 4, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < batch; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
__m128 _sum4 = _mm_setzero_ps();
__m128 _sum5 = _mm_setzero_ps();
__m128 _sum6 = _mm_setzero_ps();
__m128 _sum7 = _mm_setzero_ps();
__m128 _sum8 = _mm_setzero_ps();
__m128 _sum9 = _mm_setzero_ps();
__m128 _suma = _mm_setzero_ps();
__m128 _sumb = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _val3 = _mm_load1_ps(r0 + 3);
__m128 _val4 = _mm_load1_ps(r0 + 4);
__m128 _val5 = _mm_load1_ps(r0 + 5);
__m128 _val6 = _mm_load1_ps(r0 + 6);
__m128 _val7 = _mm_load1_ps(r0 + 7);
__m128 _val8 = _mm_load1_ps(r0 + 8);
__m128 _val9 = _mm_load1_ps(r0 + 9);
__m128 _vala = _mm_load1_ps(r0 + 10);
__m128 _valb = _mm_load1_ps(r0 + 11);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
_sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5);
_sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7);
_sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9);
_suma = _mm_comp_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
_mm_store_ps(output0_tm + 4 * 2, _sum2);
_mm_store_ps(output0_tm + 4 * 3, _sum3);
_mm_store_ps(output0_tm + 4 * 4, _sum4);
_mm_store_ps(output0_tm + 4 * 5, _sum5);
_mm_store_ps(output0_tm + 4 * 6, _sum6);
_mm_store_ps(output0_tm + 4 * 7, _sum7);
_mm_store_ps(output0_tm + 4 * 8, _sum8);
_mm_store_ps(output0_tm + 4 * 9, _sum9);
_mm_store_ps(output0_tm + 4 * 10, _suma);
_mm_store_ps(output0_tm + 4 * 11, _sumb);
output0_tm += 4 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
__m128 _sum4 = _mm_setzero_ps();
__m128 _sum5 = _mm_setzero_ps();
__m128 _sum6 = _mm_setzero_ps();
__m128 _sum7 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _val3 = _mm_load1_ps(r0 + 3);
__m128 _val4 = _mm_load1_ps(r0 + 4);
__m128 _val5 = _mm_load1_ps(r0 + 5);
__m128 _val6 = _mm_load1_ps(r0 + 6);
__m128 _val7 = _mm_load1_ps(r0 + 7);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
_sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5);
_sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
_mm_store_ps(output0_tm + 4 * 2, _sum2);
_mm_store_ps(output0_tm + 4 * 3, _sum3);
_mm_store_ps(output0_tm + 4 * 4, _sum4);
_mm_store_ps(output0_tm + 4 * 5, _sum5);
_mm_store_ps(output0_tm + 4 * 6, _sum6);
_mm_store_ps(output0_tm + 4 * 7, _sum7);
output0_tm += 4 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _val3 = _mm_load1_ps(r0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
_mm_store_ps(output0_tm + 4 * 2, _sum2);
_mm_store_ps(output0_tm + 4 * 3, _sum3);
output0_tm += 4 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
output0_tm += 4 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
r0 += 1;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum);
output0_tm += 4;
}
}
}
}
|
mpi-omp-mat-infnorm-blkstp.c | #ifdef _CIVL
#include <civlc.cvh>
#endif
/*************************************************************************
C-DAC Tech Workshop : HeGaPa-2012
July 16-20,2012
Example 3 : Mpi-Omp_MatInf_blkstp.c
Objective : Write parallel program using MPI and OPENMP to compute norm
of a square matrix.
Input : Read file (infndata.inp) for Matrix
Output : Process with rank 0 prints the value of Infinity Norm
Created : MAY-2012
************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#include <omp.h>
#ifdef _CIVL
$input int NUM_ROWS_BOUND = 4;
$input int NUM_COLS_BOUND = 4;
#endif
/* Main Program */
int main(int argc, char **argv) // "int" inserted manually
//main(int argc, char **argv)
{
int Numprocs, MyRank, iam;
int NoofCols, NoofRows, ScatterSize;
int index, irow, icol;
int Root = 0;
float **InputMatrix, *Buffer, *MyBuffer;
float max = 0, sum = 0, Inf_norm = 0;
FILE *fp;
int MatrixFileStatus = 1;
/* ........MPI Initialisation ....... */
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
MPI_Comm_size(MPI_COMM_WORLD, &Numprocs);
if (MyRank == 0) {
/* .......Read The Matrix Input File ...... */
if ((fp = fopen("./data/infndata.inp", "r")) == NULL) {
MatrixFileStatus = 0;
}
if (MatrixFileStatus != 0) {
fscanf(fp, "%d %d\n", &NoofRows, &NoofCols);
#ifdef _CIVL
$assume(NoofRows <= NUM_ROWS_BOUND);
$assume(NoofRows <= NoofCols);
#endif
/* .......Allocate Memory For Matrix ..... */
InputMatrix = (float **) malloc(NoofRows * sizeof(float *));
for (irow = 0; irow < NoofRows; irow++)
InputMatrix[irow] = (float *) malloc(NoofCols * sizeof(float));
/* .......Read Data For Matrix ..... */
for (irow = 0; irow < NoofRows; irow++) {
for (icol = 0; icol < NoofCols; icol++)
fscanf(fp, "%f", &InputMatrix[irow][icol]);
}
fclose(fp);
/*
* .......Convert 2-D InputMatrix Into 1-D Array
* .....
*/
Buffer = (float *) malloc(NoofRows * NoofCols * sizeof(float));
index = 0;
for (irow = 0; irow < NoofRows; irow++) {
for (icol = 0; icol < NoofCols; icol++) {
Buffer[index] = InputMatrix[irow][icol];
index++;
}
}
}
}/* MyRank == 0 */
MPI_Barrier(MPI_COMM_WORLD);
MPI_Bcast(&MatrixFileStatus, 1, MPI_INT, Root, MPI_COMM_WORLD);
if (MatrixFileStatus == 0) {
if (MyRank == Root)
printf("Can't Open Matrix Input File");
MPI_Finalize();
exit(-1);
}
MPI_Bcast(&NoofRows, 1, MPI_INT, Root, MPI_COMM_WORLD);
#ifdef _CIVL
$assume(NoofRows >= Numprocs);
#endif
if (NoofRows < Numprocs) {
MPI_Finalize();
if (MyRank == 0)
printf("Noof Rows Should Be More Than No of Processors ... \n");
exit(0);
}
#ifdef _CIVL
$assume(NoofRows % Numprocs == 0);
#endif
if (NoofRows % Numprocs != 0) {
MPI_Finalize();
if (MyRank == 0) {
printf("Matrix Cannot Be Striped Evenly ..... \n");
}
exit(0);
}
MPI_Bcast(&NoofCols, 1, MPI_INT, Root, MPI_COMM_WORLD);
ScatterSize = NoofRows / Numprocs;
MyBuffer = (float *) malloc(ScatterSize * NoofCols * sizeof(float));
MPI_Scatter(Buffer, ScatterSize * NoofCols, MPI_FLOAT,
MyBuffer, ScatterSize * NoofCols, MPI_FLOAT,
0, MPI_COMM_WORLD);
/* OpenMP Parallel Directive */
max = 0;
/*
#pragma omp parallel private(iam) shared(max)
{
iam = omp_get_thread_num();
printf("The Threadid Is %d With each Processor's Rank %d\n", iam, MyRank);
OpenMP Parallel For Directive
*/
omp_set_num_threads(4);
#pragma omp parallel for private(sum,index,icol) shared(max)
for (irow = 0; irow < ScatterSize; irow++) {
printf("The Threadid Is %d With each Processor's Rank %d\n",omp_get_thread_num(), MyRank);
sum = 0;
index = irow * NoofCols;
for (icol = 0; icol < NoofCols; icol++) {
sum += (MyBuffer[index] >= 0) ? (MyBuffer[index]) : (0 - MyBuffer[index]);
index++;
}
/* OpenMP Critical Section */
#pragma omp critical
if (sum > max)
max = sum;
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Reduce(&max, &Inf_norm, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
if (MyRank == 0) {
max = 0;
/* Serial Check */
for (irow = 0; irow < NoofRows; irow++) {
sum = 0;
index = irow * NoofCols;
for (icol = 0; icol < NoofCols; icol++) {
sum += (Buffer[index] >= 0) ? (Buffer[index]) : (0 - Buffer[index]);
index++;
}
max = max < sum ? sum : max;
}
printf("\nThe Infinity Norm Is(Parallel Code) : %f\n", Inf_norm);
printf("\nThe Infinity Norm Is(Serial Code) : %f\n\n", max);
/* Freeing Allocated Memory */
free(InputMatrix);
free(Buffer);
}
/* MPI-Termination */
free(MyBuffer);
MPI_Finalize();
}
|
GB_binop__bclr_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__bclr_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__bclr_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint8)
// C=scalar+B GB (_bind1st__bclr_uint8)
// C=scalar+B' GB (_bind1st_tran__bclr_uint8)
// C=A+scalar GB (_bind2nd__bclr_uint8)
// C=A'+scalar GB (_bind2nd_tran__bclr_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = GB_BITCLR (aij, bij, uint8_t, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, uint8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT8 || GxB_NO_BCLR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bclr_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bclr_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bclr_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, uint8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bclr_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, uint8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_7x7_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv7x7s2_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* r5 = img0.row(5);
const float* r6 = img0.row(6);
const float* kptr = (const float*)kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j + 7 < outw; j += 8)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r0
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[2] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[2] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[3] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[0] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[1] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"fmla v20.4s, v28.4s, v3.s[0] \n"
"fmla v21.4s, v28.4s, v3.s[2] \n"
"fmla v22.4s, v28.4s, v4.s[0] \n"
"fmla v23.4s, v28.4s, v4.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v20.4s, v29.4s, v3.s[1] \n"
"fmla v21.4s, v29.4s, v3.s[3] \n"
"fmla v22.4s, v29.4s, v4.s[1] \n"
"fmla v23.4s, v29.4s, v4.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%2], #64 \n" // r1
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"fmla v20.4s, v30.4s, v3.s[2] \n"
"fmla v21.4s, v30.4s, v4.s[0] \n"
"fmla v22.4s, v30.4s, v4.s[2] \n"
"fmla v23.4s, v30.4s, v5.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4s, v11.4s}, [%2] \n"
"fmla v16.4s, v24.4s, v6.s[0] \n"
"fmla v17.4s, v24.4s, v6.s[2] \n"
"fmla v18.4s, v24.4s, v7.s[0] \n"
"fmla v19.4s, v24.4s, v7.s[2] \n"
"fmla v20.4s, v24.4s, v8.s[0] \n"
"fmla v21.4s, v24.4s, v8.s[2] \n"
"fmla v22.4s, v24.4s, v9.s[0] \n"
"fmla v23.4s, v24.4s, v9.s[2] \n"
"fmla v16.4s, v25.4s, v6.s[1] \n"
"fmla v17.4s, v25.4s, v6.s[3] \n"
"fmla v18.4s, v25.4s, v7.s[1] \n"
"fmla v19.4s, v25.4s, v7.s[3] \n"
"fmla v20.4s, v25.4s, v8.s[1] \n"
"fmla v21.4s, v25.4s, v8.s[3] \n"
"fmla v22.4s, v25.4s, v9.s[1] \n"
"fmla v23.4s, v25.4s, v9.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v6.s[2] \n"
"fmla v17.4s, v26.4s, v7.s[0] \n"
"fmla v18.4s, v26.4s, v7.s[2] \n"
"fmla v19.4s, v26.4s, v8.s[0] \n"
"fmla v20.4s, v26.4s, v8.s[2] \n"
"fmla v21.4s, v26.4s, v9.s[0] \n"
"fmla v22.4s, v26.4s, v9.s[2] \n"
"fmla v23.4s, v26.4s, v10.s[0] \n"
"fmla v16.4s, v27.4s, v6.s[3] \n"
"fmla v17.4s, v27.4s, v7.s[1] \n"
"fmla v18.4s, v27.4s, v7.s[3] \n"
"fmla v19.4s, v27.4s, v8.s[1] \n"
"fmla v20.4s, v27.4s, v8.s[3] \n"
"fmla v21.4s, v27.4s, v9.s[1] \n"
"fmla v22.4s, v27.4s, v9.s[3] \n"
"fmla v23.4s, v27.4s, v10.s[1] \n"
"fmla v16.4s, v28.4s, v7.s[0] \n"
"fmla v17.4s, v28.4s, v7.s[2] \n"
"fmla v18.4s, v28.4s, v8.s[0] \n"
"fmla v19.4s, v28.4s, v8.s[2] \n"
"fmla v20.4s, v28.4s, v9.s[0] \n"
"fmla v21.4s, v28.4s, v9.s[2] \n"
"fmla v22.4s, v28.4s, v10.s[0] \n"
"fmla v23.4s, v28.4s, v10.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v7.s[1] \n"
"fmla v17.4s, v29.4s, v7.s[3] \n"
"fmla v18.4s, v29.4s, v8.s[1] \n"
"fmla v19.4s, v29.4s, v8.s[3] \n"
"fmla v20.4s, v29.4s, v9.s[1] \n"
"fmla v21.4s, v29.4s, v9.s[3] \n"
"fmla v22.4s, v29.4s, v10.s[1] \n"
"fmla v23.4s, v29.4s, v10.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r2
"fmla v16.4s, v30.4s, v7.s[2] \n"
"fmla v17.4s, v30.4s, v8.s[0] \n"
"fmla v18.4s, v30.4s, v8.s[2] \n"
"fmla v19.4s, v30.4s, v9.s[0] \n"
"fmla v20.4s, v30.4s, v9.s[2] \n"
"fmla v21.4s, v30.4s, v10.s[0] \n"
"fmla v22.4s, v30.4s, v10.s[2] \n"
"fmla v23.4s, v30.4s, v11.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4s, v5.4s}, [%3] \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[2] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[3] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[0] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[1] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"fmla v20.4s, v28.4s, v3.s[0] \n"
"fmla v21.4s, v28.4s, v3.s[2] \n"
"fmla v22.4s, v28.4s, v4.s[0] \n"
"fmla v23.4s, v28.4s, v4.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v20.4s, v29.4s, v3.s[1] \n"
"fmla v21.4s, v29.4s, v3.s[3] \n"
"fmla v22.4s, v29.4s, v4.s[1] \n"
"fmla v23.4s, v29.4s, v4.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%4], #64 \n" // r3
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"fmla v20.4s, v30.4s, v3.s[2] \n"
"fmla v21.4s, v30.4s, v4.s[0] \n"
"fmla v22.4s, v30.4s, v4.s[2] \n"
"fmla v23.4s, v30.4s, v5.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v10.4s, v11.4s}, [%4] \n"
"fmla v16.4s, v24.4s, v6.s[0] \n"
"fmla v17.4s, v24.4s, v6.s[2] \n"
"fmla v18.4s, v24.4s, v7.s[0] \n"
"fmla v19.4s, v24.4s, v7.s[2] \n"
"fmla v20.4s, v24.4s, v8.s[0] \n"
"fmla v21.4s, v24.4s, v8.s[2] \n"
"fmla v22.4s, v24.4s, v9.s[0] \n"
"fmla v23.4s, v24.4s, v9.s[2] \n"
"fmla v16.4s, v25.4s, v6.s[1] \n"
"fmla v17.4s, v25.4s, v6.s[3] \n"
"fmla v18.4s, v25.4s, v7.s[1] \n"
"fmla v19.4s, v25.4s, v7.s[3] \n"
"fmla v20.4s, v25.4s, v8.s[1] \n"
"fmla v21.4s, v25.4s, v8.s[3] \n"
"fmla v22.4s, v25.4s, v9.s[1] \n"
"fmla v23.4s, v25.4s, v9.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v6.s[2] \n"
"fmla v17.4s, v26.4s, v7.s[0] \n"
"fmla v18.4s, v26.4s, v7.s[2] \n"
"fmla v19.4s, v26.4s, v8.s[0] \n"
"fmla v20.4s, v26.4s, v8.s[2] \n"
"fmla v21.4s, v26.4s, v9.s[0] \n"
"fmla v22.4s, v26.4s, v9.s[2] \n"
"fmla v23.4s, v26.4s, v10.s[0] \n"
"fmla v16.4s, v27.4s, v6.s[3] \n"
"fmla v17.4s, v27.4s, v7.s[1] \n"
"fmla v18.4s, v27.4s, v7.s[3] \n"
"fmla v19.4s, v27.4s, v8.s[1] \n"
"fmla v20.4s, v27.4s, v8.s[3] \n"
"fmla v21.4s, v27.4s, v9.s[1] \n"
"fmla v22.4s, v27.4s, v9.s[3] \n"
"fmla v23.4s, v27.4s, v10.s[1] \n"
"fmla v16.4s, v28.4s, v7.s[0] \n"
"fmla v17.4s, v28.4s, v7.s[2] \n"
"fmla v18.4s, v28.4s, v8.s[0] \n"
"fmla v19.4s, v28.4s, v8.s[2] \n"
"fmla v20.4s, v28.4s, v9.s[0] \n"
"fmla v21.4s, v28.4s, v9.s[2] \n"
"fmla v22.4s, v28.4s, v10.s[0] \n"
"fmla v23.4s, v28.4s, v10.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v7.s[1] \n"
"fmla v17.4s, v29.4s, v7.s[3] \n"
"fmla v18.4s, v29.4s, v8.s[1] \n"
"fmla v19.4s, v29.4s, v8.s[3] \n"
"fmla v20.4s, v29.4s, v9.s[1] \n"
"fmla v21.4s, v29.4s, v9.s[3] \n"
"fmla v22.4s, v29.4s, v10.s[1] \n"
"fmla v23.4s, v29.4s, v10.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" // r4
"fmla v16.4s, v30.4s, v7.s[2] \n"
"fmla v17.4s, v30.4s, v8.s[0] \n"
"fmla v18.4s, v30.4s, v8.s[2] \n"
"fmla v19.4s, v30.4s, v9.s[0] \n"
"fmla v20.4s, v30.4s, v9.s[2] \n"
"fmla v21.4s, v30.4s, v10.s[0] \n"
"fmla v22.4s, v30.4s, v10.s[2] \n"
"fmla v23.4s, v30.4s, v11.s[0] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v4.4s, v5.4s}, [%5] \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[2] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[3] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[0] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[1] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"fmla v20.4s, v28.4s, v3.s[0] \n"
"fmla v21.4s, v28.4s, v3.s[2] \n"
"fmla v22.4s, v28.4s, v4.s[0] \n"
"fmla v23.4s, v28.4s, v4.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v20.4s, v29.4s, v3.s[1] \n"
"fmla v21.4s, v29.4s, v3.s[3] \n"
"fmla v22.4s, v29.4s, v4.s[1] \n"
"fmla v23.4s, v29.4s, v4.s[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%6], #64 \n" // r5
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"fmla v20.4s, v30.4s, v3.s[2] \n"
"fmla v21.4s, v30.4s, v4.s[0] \n"
"fmla v22.4s, v30.4s, v4.s[2] \n"
"fmla v23.4s, v30.4s, v5.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v10.4s, v11.4s}, [%6] \n"
"fmla v16.4s, v24.4s, v6.s[0] \n"
"fmla v17.4s, v24.4s, v6.s[2] \n"
"fmla v18.4s, v24.4s, v7.s[0] \n"
"fmla v19.4s, v24.4s, v7.s[2] \n"
"fmla v20.4s, v24.4s, v8.s[0] \n"
"fmla v21.4s, v24.4s, v8.s[2] \n"
"fmla v22.4s, v24.4s, v9.s[0] \n"
"fmla v23.4s, v24.4s, v9.s[2] \n"
"fmla v16.4s, v25.4s, v6.s[1] \n"
"fmla v17.4s, v25.4s, v6.s[3] \n"
"fmla v18.4s, v25.4s, v7.s[1] \n"
"fmla v19.4s, v25.4s, v7.s[3] \n"
"fmla v20.4s, v25.4s, v8.s[1] \n"
"fmla v21.4s, v25.4s, v8.s[3] \n"
"fmla v22.4s, v25.4s, v9.s[1] \n"
"fmla v23.4s, v25.4s, v9.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v6.s[2] \n"
"fmla v17.4s, v26.4s, v7.s[0] \n"
"fmla v18.4s, v26.4s, v7.s[2] \n"
"fmla v19.4s, v26.4s, v8.s[0] \n"
"fmla v20.4s, v26.4s, v8.s[2] \n"
"fmla v21.4s, v26.4s, v9.s[0] \n"
"fmla v22.4s, v26.4s, v9.s[2] \n"
"fmla v23.4s, v26.4s, v10.s[0] \n"
"fmla v16.4s, v27.4s, v6.s[3] \n"
"fmla v17.4s, v27.4s, v7.s[1] \n"
"fmla v18.4s, v27.4s, v7.s[3] \n"
"fmla v19.4s, v27.4s, v8.s[1] \n"
"fmla v20.4s, v27.4s, v8.s[3] \n"
"fmla v21.4s, v27.4s, v9.s[1] \n"
"fmla v22.4s, v27.4s, v9.s[3] \n"
"fmla v23.4s, v27.4s, v10.s[1] \n"
"fmla v16.4s, v28.4s, v7.s[0] \n"
"fmla v17.4s, v28.4s, v7.s[2] \n"
"fmla v18.4s, v28.4s, v8.s[0] \n"
"fmla v19.4s, v28.4s, v8.s[2] \n"
"fmla v20.4s, v28.4s, v9.s[0] \n"
"fmla v21.4s, v28.4s, v9.s[2] \n"
"fmla v22.4s, v28.4s, v10.s[0] \n"
"fmla v23.4s, v28.4s, v10.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v7.s[1] \n"
"fmla v17.4s, v29.4s, v7.s[3] \n"
"fmla v18.4s, v29.4s, v8.s[1] \n"
"fmla v19.4s, v29.4s, v8.s[3] \n"
"fmla v20.4s, v29.4s, v9.s[1] \n"
"fmla v21.4s, v29.4s, v9.s[3] \n"
"fmla v22.4s, v29.4s, v10.s[1] \n"
"fmla v23.4s, v29.4s, v10.s[3] \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%7], #64 \n" // r6
"fmla v16.4s, v30.4s, v7.s[2] \n"
"fmla v17.4s, v30.4s, v8.s[0] \n"
"fmla v18.4s, v30.4s, v8.s[2] \n"
"fmla v19.4s, v30.4s, v9.s[0] \n"
"fmla v20.4s, v30.4s, v9.s[2] \n"
"fmla v21.4s, v30.4s, v10.s[0] \n"
"fmla v22.4s, v30.4s, v10.s[2] \n"
"fmla v23.4s, v30.4s, v11.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v4.4s, v5.4s}, [%7] \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[2] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[3] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[0] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[1] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"fmla v20.4s, v28.4s, v3.s[0] \n"
"fmla v21.4s, v28.4s, v3.s[2] \n"
"fmla v22.4s, v28.4s, v4.s[0] \n"
"fmla v23.4s, v28.4s, v4.s[2] \n"
"sub %0, %0, #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v20.4s, v29.4s, v3.s[1] \n"
"fmla v21.4s, v29.4s, v3.s[3] \n"
"fmla v22.4s, v29.4s, v4.s[1] \n"
"fmla v23.4s, v29.4s, v4.s[3] \n"
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"fmla v20.4s, v30.4s, v3.s[2] \n"
"fmla v21.4s, v30.4s, v4.s[0] \n"
"fmla v22.4s, v30.4s, v4.s[2] \n"
"fmla v23.4s, v30.4s, v5.s[0] \n"
"sub %8, %8, #784 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v24", "v25", "v26", "v27", "v28", "v29", "v30");
}
#endif // __aarch64__
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1] \n" // r0
"add %1, %1, #32 \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2] \n" // r1
"add %2, %2, #32 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v24.4s, v5.s[0] \n"
"fmla v19.4s, v24.4s, v5.s[2] \n"
"fmla v16.4s, v25.4s, v4.s[1] \n"
"fmla v17.4s, v25.4s, v4.s[3] \n"
"fmla v18.4s, v25.4s, v5.s[1] \n"
"fmla v19.4s, v25.4s, v5.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"fmla v18.4s, v26.4s, v5.s[2] \n"
"fmla v19.4s, v26.4s, v6.s[0] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3] \n" // r2
"add %3, %3, #32 \n"
"fmla v16.4s, v27.4s, v4.s[3] \n"
"fmla v17.4s, v27.4s, v5.s[1] \n"
"fmla v18.4s, v27.4s, v5.s[3] \n"
"fmla v19.4s, v27.4s, v6.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"fmla v18.4s, v28.4s, v6.s[0] \n"
"fmla v19.4s, v28.4s, v6.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v5.s[1] \n"
"fmla v17.4s, v29.4s, v5.s[3] \n"
"fmla v18.4s, v29.4s, v6.s[1] \n"
"fmla v19.4s, v29.4s, v6.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"fmla v18.4s, v30.4s, v6.s[2] \n"
"fmla v19.4s, v30.4s, v7.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4] \n" // r3
"add %4, %4, #32 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v24.4s, v5.s[0] \n"
"fmla v19.4s, v24.4s, v5.s[2] \n"
"fmla v16.4s, v25.4s, v4.s[1] \n"
"fmla v17.4s, v25.4s, v4.s[3] \n"
"fmla v18.4s, v25.4s, v5.s[1] \n"
"fmla v19.4s, v25.4s, v5.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"fmla v18.4s, v26.4s, v5.s[2] \n"
"fmla v19.4s, v26.4s, v6.s[0] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5] \n" // r4
"add %5, %5, #32 \n"
"fmla v16.4s, v27.4s, v4.s[3] \n"
"fmla v17.4s, v27.4s, v5.s[1] \n"
"fmla v18.4s, v27.4s, v5.s[3] \n"
"fmla v19.4s, v27.4s, v6.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"fmla v18.4s, v28.4s, v6.s[0] \n"
"fmla v19.4s, v28.4s, v6.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v5.s[1] \n"
"fmla v17.4s, v29.4s, v5.s[3] \n"
"fmla v18.4s, v29.4s, v6.s[1] \n"
"fmla v19.4s, v29.4s, v6.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"fmla v18.4s, v30.4s, v6.s[2] \n"
"fmla v19.4s, v30.4s, v7.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6] \n" // r5
"add %6, %6, #32 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v24.4s, v5.s[0] \n"
"fmla v19.4s, v24.4s, v5.s[2] \n"
"fmla v16.4s, v25.4s, v4.s[1] \n"
"fmla v17.4s, v25.4s, v4.s[3] \n"
"fmla v18.4s, v25.4s, v5.s[1] \n"
"fmla v19.4s, v25.4s, v5.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"fmla v18.4s, v26.4s, v5.s[2] \n"
"fmla v19.4s, v26.4s, v6.s[0] \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%7] \n" // r6
"add %7, %7, #32 \n"
"fmla v16.4s, v27.4s, v4.s[3] \n"
"fmla v17.4s, v27.4s, v5.s[1] \n"
"fmla v18.4s, v27.4s, v5.s[3] \n"
"fmla v19.4s, v27.4s, v6.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"fmla v18.4s, v28.4s, v6.s[0] \n"
"fmla v19.4s, v28.4s, v6.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v5.s[1] \n"
"fmla v17.4s, v29.4s, v5.s[3] \n"
"fmla v18.4s, v29.4s, v6.s[1] \n"
"fmla v19.4s, v29.4s, v6.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"fmla v18.4s, v30.4s, v6.s[2] \n"
"fmla v19.4s, v30.4s, v7.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"sub %8, %8, #784 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v24", "v25", "v26", "v27", "v28", "v29", "v30");
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n" // r0
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%1, #192] \n"
"vld1.f32 {d4-d6}, [%1] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2]! \n" // r1
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%2, #192] \n"
"vld1.f32 {d4-d6}, [%2] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3]! \n" // r2
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%3, #192] \n"
"vld1.f32 {d4-d6}, [%3] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d0-d3}, [%4]! \n" // r3
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d4-d6}, [%4] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5]! \n" // r4
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%5, #192] \n"
"vld1.f32 {d4-d6}, [%5] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d0-d3}, [%6]! \n" // r5
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d4-d6}, [%6] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d0-d3}, [%7]! \n" // r6
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%7, #192] \n"
"vld1.f32 {d4-d6}, [%7] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"sub %8, %8, #784 \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v16.4s, v17.4s}, [%0] \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n" // r0
"add %1, %1, #16 \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmul v18.4s, v24.4s, v0.s[0] \n"
"fmul v19.4s, v24.4s, v0.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v26.4s, v0.s[2] \n"
"fmla v19.4s, v26.4s, v1.s[0] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%2] \n" // r1
"add %2, %2, #16 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v28.4s, v1.s[0] \n"
"fmla v19.4s, v28.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v30.4s, v1.s[2] \n"
"fmla v19.4s, v30.4s, v2.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v25.4s, v4.s[1] \n"
"fmla v19.4s, v25.4s, v4.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n" // r2
"add %3, %3, #16 \n"
"fmla v18.4s, v27.4s, v4.s[3] \n"
"fmla v19.4s, v27.4s, v5.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v18.4s, v29.4s, v5.s[1] \n"
"fmla v19.4s, v29.4s, v5.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v18.4s, v24.4s, v0.s[0] \n"
"fmla v19.4s, v24.4s, v0.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v26.4s, v0.s[2] \n"
"fmla v19.4s, v26.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%4] \n" // r3
"add %4, %4, #16 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v28.4s, v1.s[0] \n"
"fmla v19.4s, v28.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v30.4s, v1.s[2] \n"
"fmla v19.4s, v30.4s, v2.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v25.4s, v4.s[1] \n"
"fmla v19.4s, v25.4s, v4.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%5] \n" // r4
"add %5, %5, #16 \n"
"fmla v18.4s, v27.4s, v4.s[3] \n"
"fmla v19.4s, v27.4s, v5.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v18.4s, v29.4s, v5.s[1] \n"
"fmla v19.4s, v29.4s, v5.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v18.4s, v24.4s, v0.s[0] \n"
"fmla v19.4s, v24.4s, v0.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v26.4s, v0.s[2] \n"
"fmla v19.4s, v26.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%6] \n" // r5
"add %6, %6, #16 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v28.4s, v1.s[0] \n"
"fmla v19.4s, v28.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v30.4s, v1.s[2] \n"
"fmla v19.4s, v30.4s, v2.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v25.4s, v4.s[1] \n"
"fmla v19.4s, v25.4s, v4.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"prfm pldl1keep, [%7, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%7] \n" // r6
"add %7, %7, #16 \n"
"fmla v18.4s, v27.4s, v4.s[3] \n"
"fmla v19.4s, v27.4s, v5.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v18.4s, v29.4s, v5.s[1] \n"
"fmla v19.4s, v29.4s, v5.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v18.4s, v24.4s, v0.s[0] \n"
"fmla v19.4s, v24.4s, v0.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v26.4s, v0.s[2] \n"
"fmla v19.4s, v26.4s, v1.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v28.4s, v1.s[0] \n"
"fmla v19.4s, v28.4s, v1.s[2] \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v30.4s, v1.s[2] \n"
"fmla v19.4s, v30.4s, v2.s[0] \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"sub %8, %8, #784 \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v2", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v24", "v25", "v26", "v27", "v28", "v29", "v30");
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128] \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n" // r0
"vld1.f32 {d8[0]}, [%1] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmul.f32 q12, q5, d0[0] \n"
"vmul.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q6, d0[1] \n"
"vmla.f32 q15, q6, d1[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2]! \n" // r1
"vld1.f32 {d9[0]}, [%2] \n"
"vmla.f32 q14, q8, d1[1] \n"
"vmla.f32 q15, q8, d2[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q10, d3[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d8[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q14, q5, d4[0] \n"
"vmla.f32 q15, q5, d5[0] \n"
"vmla.f32 q12, q6, d4[1] \n"
"vmla.f32 q13, q6, d5[1] \n"
"vmla.f32 q14, q7, d5[0] \n"
"vmla.f32 q15, q7, d6[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3]! \n" // r2
"vld1.f32 {d8[0]}, [%3] \n"
"vmla.f32 q12, q8, d5[1] \n"
"vmla.f32 q13, q8, d6[1] \n"
"vmla.f32 q14, q9, d6[0] \n"
"vmla.f32 q15, q9, d7[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d6[1] \n"
"vmla.f32 q13, q10, d7[1] \n"
"vmla.f32 q14, q11, d7[0] \n"
"vmla.f32 q15, q11, d9[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q6, d0[1] \n"
"vmla.f32 q15, q6, d1[1] \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4]! \n" // r3
"vld1.f32 {d9[0]}, [%4] \n"
"vmla.f32 q14, q8, d1[1] \n"
"vmla.f32 q15, q8, d2[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q10, d3[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d8[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q14, q5, d4[0] \n"
"vmla.f32 q15, q5, d5[0] \n"
"vmla.f32 q12, q6, d4[1] \n"
"vmla.f32 q13, q6, d5[1] \n"
"vmla.f32 q14, q7, d5[0] \n"
"vmla.f32 q15, q7, d6[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5]! \n" // r4
"vld1.f32 {d8[0]}, [%5] \n"
"vmla.f32 q12, q8, d5[1] \n"
"vmla.f32 q13, q8, d6[1] \n"
"vmla.f32 q14, q9, d6[0] \n"
"vmla.f32 q15, q9, d7[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d6[1] \n"
"vmla.f32 q13, q10, d7[1] \n"
"vmla.f32 q14, q11, d7[0] \n"
"vmla.f32 q15, q11, d9[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q6, d0[1] \n"
"vmla.f32 q15, q6, d1[1] \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6]! \n" // r5
"vld1.f32 {d9[0]}, [%6] \n"
"vmla.f32 q14, q8, d1[1] \n"
"vmla.f32 q15, q8, d2[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q10, d3[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d8[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q14, q5, d4[0] \n"
"vmla.f32 q15, q5, d5[0] \n"
"vmla.f32 q12, q6, d4[1] \n"
"vmla.f32 q13, q6, d5[1] \n"
"vmla.f32 q14, q7, d5[0] \n"
"vmla.f32 q15, q7, d6[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d0-d3}, [%7]! \n" // r6
"vld1.f32 {d8[0]}, [%7] \n"
"vmla.f32 q12, q8, d5[1] \n"
"vmla.f32 q13, q8, d6[1] \n"
"vmla.f32 q14, q9, d6[0] \n"
"vmla.f32 q15, q9, d7[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d6[1] \n"
"vmla.f32 q13, q10, d7[1] \n"
"vmla.f32 q14, q11, d7[0] \n"
"vmla.f32 q15, q11, d9[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q6, d0[1] \n"
"vmla.f32 q15, q6, d1[1] \n"
"sub %1, %1, #16 \n"
"sub %2, %2, #16 \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q8, d1[1] \n"
"vmla.f32 q15, q8, d2[1] \n"
"sub %8, %8, #784 \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q10, d3[1] \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d8[0] \n"
"sub %5, %5, #16 \n"
"sub %6, %6, #16 \n"
"vadd.f32 q14, q14, q12 \n"
"vadd.f32 q15, q15, q13 \n"
"sub %7, %7, #16 \n"
"vst1.f32 {d28-d31}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1] \n" // r0
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmul v17.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmul v18.4s, v25.4s, v0.s[1] \n"
"fmul v19.4s, v26.4s, v0.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4s, v5.4s}, [%2] \n" // r1
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v28.4s, v1.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v18.4s, v29.4s, v1.s[1] \n"
"fmla v19.4s, v30.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v25.4s, v4.s[1] \n"
"fmla v18.4s, v26.4s, v4.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3] \n" // r2
"fmla v19.4s, v27.4s, v4.s[3] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v17.4s, v29.4s, v5.s[1] \n"
"fmla v18.4s, v30.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v19.4s, v24.4s, v0.s[0] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v26.4s, v0.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4] \n" // r3
"fmla v18.4s, v27.4s, v0.s[3] \n"
"fmla v19.4s, v28.4s, v1.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v30.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v18.4s, v24.4s, v4.s[0] \n"
"fmla v19.4s, v25.4s, v4.s[1] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5] \n" // r4
"fmla v17.4s, v27.4s, v4.s[3] \n"
"fmla v18.4s, v28.4s, v5.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v19.4s, v29.4s, v5.s[1] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v17.4s, v24.4s, v0.s[0] \n"
"fmla v18.4s, v25.4s, v0.s[1] \n"
"fmla v19.4s, v26.4s, v0.s[2] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4s, v5.4s}, [%6] \n" // r5
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v28.4s, v1.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v18.4s, v29.4s, v1.s[1] \n"
"fmla v19.4s, v30.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v25.4s, v4.s[1] \n"
"fmla v18.4s, v26.4s, v4.s[2] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v0.4s, v1.4s}, [%7] \n" // r6
"fmla v19.4s, v27.4s, v4.s[3] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v17.4s, v29.4s, v5.s[1] \n"
"fmla v18.4s, v30.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v19.4s, v24.4s, v0.s[0] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v26.4s, v0.s[2] \n"
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"fmla v18.4s, v27.4s, v0.s[3] \n"
"fmla v19.4s, v28.4s, v1.s[0] \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v30.4s, v1.s[2] \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"fadd v18.4s, v18.4s, v19.4s \n"
"add %5, %5, #8 \n"
"fadd v16.4s, v16.4s, v17.4s \n"
"add %6, %6, #8 \n"
"add %7, %7, #8 \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"sub %8, %8, #784 \n"
"st1 {v16.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v4", "v5", "v16", "v17", "v18", "v19", "v24", "v25", "v26", "v27", "v28", "v29", "v30");
#else // __aarch64__
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d8-d9}, [%0 :128] \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1] \n" // r0
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmul.f32 q5, q8, d0[0] \n"
"vmul.f32 q6, q9, d0[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmul.f32 q7, q10, d1[0] \n"
"vmla.f32 q4, q11, d1[1] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2] \n" // r1
"vmla.f32 q5, q12, d2[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q6, q13, d2[1] \n"
"vmla.f32 q7, q14, d3[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q4, q8, d4[0] \n"
"vmla.f32 q5, q9, d4[1] \n"
"vmla.f32 q6, q10, d5[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3] \n" // r2
"vmla.f32 q7, q11, d5[1] \n"
"vmla.f32 q4, q12, d6[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q5, q13, d6[1] \n"
"vmla.f32 q6, q14, d7[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q7, q8, d0[0] \n"
"vmla.f32 q4, q9, d0[1] \n"
"vmla.f32 q5, q10, d1[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4] \n" // r3
"vmla.f32 q6, q11, d1[1] \n"
"vmla.f32 q7, q12, d2[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q4, q13, d2[1] \n"
"vmla.f32 q5, q14, d3[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q6, q8, d4[0] \n"
"vmla.f32 q7, q9, d4[1] \n"
"vmla.f32 q4, q10, d5[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5] \n" // r4
"vmla.f32 q5, q11, d5[1] \n"
"vmla.f32 q6, q12, d6[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q7, q13, d6[1] \n"
"vmla.f32 q4, q14, d7[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q5, q8, d0[0] \n"
"vmla.f32 q6, q9, d0[1] \n"
"vmla.f32 q7, q10, d1[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6] \n" // r5
"vmla.f32 q4, q11, d1[1] \n"
"vmla.f32 q5, q12, d2[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q6, q13, d2[1] \n"
"vmla.f32 q7, q14, d3[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q4, q8, d4[0] \n"
"vmla.f32 q5, q9, d4[1] \n"
"vmla.f32 q6, q10, d5[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d0-d3}, [%7] \n" // r6
"vmla.f32 q7, q11, d5[1] \n"
"vmla.f32 q4, q12, d6[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q5, q13, d6[1] \n"
"vmla.f32 q6, q14, d7[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q7, q8, d0[0] \n"
"vmla.f32 q4, q9, d0[1] \n"
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"vmla.f32 q5, q10, d1[0] \n"
"vmla.f32 q6, q11, d1[1] \n"
"sub %8, %8, #784 \n"
"vmla.f32 q7, q12, d2[0] \n"
"vmla.f32 q4, q13, d2[1] \n"
"vmla.f32 q5, q14, d3[0] \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"vadd.f32 q6, q6, q7 \n"
"add %5, %5, #8 \n"
"vadd.f32 q4, q4, q5 \n"
"add %6, %6, #8 \n"
"vadd.f32 q4, q4, q6 \n"
"add %7, %7, #8 \n"
"vst1.f32 {d8-d9}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14");
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
|
GB_binop__isne_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__isne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__isne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint8)
// A*D function (colscale): GB (_AxD__isne_uint8)
// D*A function (rowscale): GB (_DxB__isne_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint8)
// C=scalar+B GB (_bind1st__isne_uint8)
// C=scalar+B' GB (_bind1st_tran__isne_uint8)
// C=A+scalar GB (_bind2nd__isne_uint8)
// C=A'+scalar GB (_bind2nd_tran__isne_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_UINT8 || GxB_NO_ISNE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isne_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ztrtri.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_trtri
*
* Computes the inverse of an upper or lower triangular matrix A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] diag
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the triangular matrix A. If uplo = 'U', the
* leading n-by-n upper triangular part of the array A
* contains the upper triangular matrix, and the strictly
* lower triangular part of A is not referenced. If uplo =
* 'L', the leading n-by-n lower triangular part of the array
* A contains the lower triangular matrix, and the strictly
* upper triangular part of A is not referenced. If diag =
* 'U', the diagonal elements of A are also not referenced and
* are assumed to be 1. On exit, the (triangular) inverse of
* the original matrix, in the same storage format.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit.
* @retval < 0 if -i, the i-th argument had an illegal value.
* @retval > 0 if i, A(i,i) is exactly zero. The triangular
* matrix is singular and its inverse can not be computed.
*
*******************************************************************************
*
* @sa plasma_ctrtri
* @sa plasma_dtrtri
* @sa plasma_strtri
*
******************************************************************************/
int plasma_ztrtri(plasma_enum_t uplo, plasma_enum_t diag,
int n, plasma_complex64_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -1;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -5;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// Asynchronous block.
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, sequence, &request);
// Call the tile async function.
plasma_omp_ztrtri(uplo, diag, A, sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, sequence, &request);
}
// Implicit synchronization.
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_trtri
*
* Computes the inverse of a complex upper or lower triangular matrix A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] diag
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] A
* On entry, the triangular matrix A. If uplo = 'U', the
* leading n-by-n upper triangular part of the array A
* contains the upper triangular matrix, and the strictly
* lower triangular part of A is not referenced. If uplo =
* 'L', the leading n-by-n lower triangular part of the array
* A contains the lower triangular matrix, and the strictly
* upper triangular part of A is not referenced. If diag =
* 'U', the diagonal elements of A are also not referenced and
* are assumed to be 1. On exit, the (triangular) inverse of
* the original matrix, in the same storage format.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_ztrtri
* @sa plasma_omp_ztrtri
* @sa plasma_omp_ctrtri
* @sa plasma_omp_dtrtri
* @sa plasma_omp_strtri
*
******************************************************************************/
void plasma_omp_ztrtri(plasma_enum_t uplo, plasma_enum_t diag,
plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((diag != PlasmaUnit) &&
(diag != PlasmaNonUnit)) {
plasma_error("illegal value of diag");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Quick return
if (A.n == 0)
return;
// Call the parallel function.
plasma_pztrtri(uplo, diag, A, sequence, request);
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(32*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(16*t1+Nx+29,64)),floord(32*t2+Nx+28,64)),floord(8*t3+Nx+4,64)),floord(32*t1-32*t2+Nz+Nx+27,64));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),64*t4+62),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
matrix_completion.c | #include <stdio.h>
#include <omp.h>
void run(double u[], double v[],int idx_i[], int idx_j[], double result[], int n,double w) {
#pragma omp parallel for
for(long i = 0; i < n; i++) {
result[i] = u[idx_i[i]-1]*v[idx_j[i]-1]*w;
}
}
void phi(double** u, double** v,long* idx_i, long* idx_j, double* result, double* w, int n, int r) {
int j = 0;
#pragma omp parallel for
for(long i = 0; i < n; i++) {
double* u_i = u[idx_i[i]-1];
double* v_i = v[idx_j[i]-1];
float r = 0.0;
for (j = 0; j < r; j++){
r += w[j]*u_i[j]*v_i[j];
}
result[i] = (double)r;
}
}
double f_only(double** u, double** v,long* idx_i, long* idx_j, double* w, double*y, int n, int r) {
double s = 0.0;
#pragma omp parallel for reduction (+ : s)
for(long i = 0; i < n; i++) {
double* u_i = u[idx_i[i]-1];
double* v_i = v[idx_j[i]-1];
double residual = 0.0;
for (int j = 0; j < r; j++){
residual += w[j]*u_i[j]*v_i[j];
}
residual = residual - y[i];
s = s + residual*residual;
}
return 0.5*s;
}
double fg(double** up, double** vp, double** u, double** v,long* idx_i, long* idx_j, double* w,double*y, int n, int r) {
int j = 0;
double s = 0.0;
#pragma omp parallel for reduction (+ : s)
for(long i = 0; i < n; i++) {
double* u_i = u[idx_i[i]-1];
double* v_i = v[idx_j[i]-1];
double result = 0.0;
for (j = 0; j < r; j++){
result += w[j]*u_i[j]*v_i[j];
}
result = result - y[i];
s += result*result;
for (j = 0; j < r; j++){
up[idx_i[i]-1][j] += result*v_i[j]*w[j];
vp[idx_j[i]-1][j] += result*u_i[j]*w[j];
}
}
return 0.5*s;
}
|
matrix_comp.h | #ifndef MATRIX_COMP_H_
#define MATRIX_COMP_H_
namespace acspo {
template <typename T, typename S>
matrix<bool> operator==(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] == m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator!=(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] != m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator<(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] < m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator<=(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] <= m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator>(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] > m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator>=(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] >= m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator==(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] == val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator!=(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] != val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator<(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] < val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator<=(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] <= val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator>(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] > val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator>=(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] >= val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator==(const S &val, const matrix<T> &mat)
{
return mat == val;
}
template <typename T, typename S>
matrix<bool> operator!=(const S &val, const matrix<T> &mat)
{
return mat != val;
}
template <typename T, typename S>
matrix<bool> operator<(const S &val, const matrix<T> &mat)
{
return mat > val;
}
template <typename T, typename S>
matrix<bool> operator<=(const S &val, const matrix<T> &mat)
{
return mat >= val;
}
template <typename T, typename S>
matrix<bool> operator>(const S &val, const matrix<T> &mat)
{
return mat < val;
}
template <typename T, typename S>
matrix<bool> operator>=(const S &val, const matrix<T> &mat)
{
return mat <= val;
}
}
#endif
|
private.c | #include "private.h"
static timer linsysTimer;
static pfloat totalSolveTime;
char * getLinSysMethod(Data * d, Priv * p) {
char * tmp = scs_malloc(sizeof(char) * 64);
sprintf(tmp, "sparse-direct, nnz in A = %li", (long) d->A->p[d->n]);
return tmp;
}
char * getLinSysSummary(Priv * p, Info * info) {
char * str = scs_malloc(sizeof(char) * 64);
idxint n = p->L->n;
sprintf(str, "\tLin-sys: nnz in L factor: %li, avg solve time: %1.2es\n", (long ) (p->L->p[n] + n),
totalSolveTime / (info->iter + 1) / 1e3);
totalSolveTime = 0;
return str;
}
void freePriv(Priv * p) {
if (p) {
if (p->L)
cs_spfree(p->L);
if (p->P)
scs_free(p->P);
if (p->D)
scs_free(p->D);
if (p->bp)
scs_free(p->bp);
scs_free(p);
}
}
cs * formKKT(Data * d) {
/* ONLY UPPER TRIANGULAR PART IS STUFFED
* forms column compressed KKT matrix
* assumes column compressed form A matrix
*
* forms upper triangular part of [I A'; A -I]
*/
idxint j, k, kk;
cs * K_cs;
AMatrix * A = d->A;
/* I at top left */
const idxint Anz = A->p[d->n];
const idxint Knzmax = d->n + d->m + Anz;
cs * K = cs_spalloc(d->m + d->n, d->m + d->n, Knzmax, 1, 1);
#ifdef EXTRAVERBOSE
scs_printf("forming KKT\n");
#endif
if (!K) {
return NULL;
}
kk = 0;
for (k = 0; k < d->n; k++) {
K->i[kk] = k;
K->p[kk] = k;
K->x[kk] = d->RHO_X;
kk++;
}
/* A^T at top right : CCS: */
for (j = 0; j < d->n; j++) {
for (k = A->p[j]; k < A->p[j + 1]; k++) {
K->p[kk] = A->i[k] + d->n;
K->i[kk] = j;
K->x[kk] = A->x[k];
kk++;
}
}
/* -I at bottom right */
for (k = 0; k < d->m; k++) {
K->i[kk] = k + d->n;
K->p[kk] = k + d->n;
K->x[kk] = -1;
kk++;
}
/* assert kk == Knzmax */
K->nz = Knzmax;
K_cs = cs_compress(K);
cs_spfree(K);
return (K_cs);
}
idxint LDLInit(cs * A, idxint P[], pfloat **info) {
*info = (pfloat *) scs_malloc(AMD_INFO * sizeof(pfloat));
#ifdef DLONG
return(amd_l_order(A->n, A->p, A->i, P, (pfloat *) NULL, *info));
#else
return (amd_order(A->n, A->p, A->i, P, (pfloat *) NULL, *info));
#endif
}
idxint LDLFactor(cs * A, idxint P[], idxint Pinv[], cs **L, pfloat **D) {
idxint kk, n = A->n;
idxint * Parent = scs_malloc(n * sizeof(idxint));
idxint * Lnz = scs_malloc(n * sizeof(idxint));
idxint * Flag = scs_malloc(n * sizeof(idxint));
idxint * Pattern = scs_malloc(n * sizeof(idxint));
pfloat * Y = scs_malloc(n * sizeof(pfloat));
(*L)->p = (idxint *) scs_malloc((1 + n) * sizeof(idxint));
/*idxint Parent[n], Lnz[n], Flag[n], Pattern[n]; */
/*pfloat Y[n]; */
LDL_symbolic(n, A->p, A->i, (*L)->p, Parent, Lnz, Flag, P, Pinv);
(*L)->nzmax = *((*L)->p + n);
(*L)->x = (pfloat *) scs_malloc((*L)->nzmax * sizeof(pfloat));
(*L)->i = (idxint *) scs_malloc((*L)->nzmax * sizeof(idxint));
*D = (pfloat *) scs_malloc(n * sizeof(pfloat));
if (!(*D) || !(*L)->i || !(*L)->x || !Y || !Pattern || !Flag || !Lnz || !Parent)
return -1;
#ifdef EXTRAVERBOSE
scs_printf("numeric factorization\n");
#endif
kk = LDL_numeric(n, A->p, A->i, A->x, (*L)->p, Parent, Lnz, (*L)->i, (*L)->x, *D, Y, Pattern, Flag, P, Pinv);
#ifdef EXTRAVERBOSE
scs_printf("finished numeric factorization\n");
#endif
scs_free(Parent);
scs_free(Lnz);
scs_free(Flag);
scs_free(Pattern);
scs_free(Y);
return (n - kk);
}
void LDLSolve(pfloat *x, pfloat b[], cs * L, pfloat D[], idxint P[], pfloat * bp) {
/* solves PLDL'P' x = b for x */
idxint n = L->n;
if (P == NULL) {
if (x != b) /* if they're different addresses */
memcpy(x, b, n * sizeof(pfloat));
LDL_lsolve(n, x, L->p, L->i, L->x);
LDL_dsolve(n, x, D);
LDL_ltsolve(n, x, L->p, L->i, L->x);
} else {
LDL_perm(n, bp, b, P);
LDL_lsolve(n, bp, L->p, L->i, L->x);
LDL_dsolve(n, bp, D);
LDL_ltsolve(n, bp, L->p, L->i, L->x);
LDL_permt(n, x, bp, P);
}
}
void _accumByAtrans(idxint n, pfloat * Ax, idxint * Ai, idxint * Ap, const pfloat *x, pfloat *y) {
/* y = A'*x
A in column compressed format
parallelizes over columns (rows of A')
*/
idxint p, j;
idxint c1, c2;
pfloat yj;
#ifdef OPENMP
#pragma omp parallel for private(p,c1,c2,yj)
#endif
for (j = 0; j < n; j++) {
yj = y[j];
c1 = Ap[j];
c2 = Ap[j + 1];
for (p = c1; p < c2; p++) {
yj += Ax[p] * x[Ai[p]];
}
y[j] = yj;
}
}
void _accumByA(idxint n, pfloat * Ax, idxint * Ai, idxint * Ap, const pfloat *x, pfloat *y) {
/*y = A*x
A in column compressed format
this parallelizes over columns and uses
pragma atomic to prevent concurrent writes to y
*/
idxint p, j;
idxint c1, c2;
pfloat xj;
/*#pragma omp parallel for private(p,c1,c2,xj) */
for (j = 0; j < n; j++) {
xj = x[j];
c1 = Ap[j];
c2 = Ap[j + 1];
for (p = c1; p < c2; p++) {
/*#pragma omp atomic */
y[Ai[p]] += Ax[p] * xj;
}
}
}
void accumByAtrans(Data * d, Priv * p, const pfloat *x, pfloat *y) {
AMatrix * A = d->A;
_accumByAtrans(d->n, A->x, A->i, A->p, x, y);
}
void accumByA(Data * d, Priv * p, const pfloat *x, pfloat *y) {
AMatrix * A = d->A;
_accumByA(d->n, A->x, A->i, A->p, x, y);
}
idxint factorize(Data * d, Priv * p) {
pfloat *info;
idxint *Pinv, amd_status, ldl_status;
cs *C, *K = formKKT(d);
if (!K) {
return -1;
}
amd_status = LDLInit(K, p->P, &info);
if (amd_status < 0)
return (amd_status);
#ifdef EXTRAVERBOSE
if(d->VERBOSE) {
scs_printf("Matrix factorization info:\n");
#ifdef DLONG
amd_l_info(info);
#else
amd_info(info);
#endif
}
#endif
Pinv = cs_pinv(p->P, d->n + d->m);
C = cs_symperm(K, Pinv, 1);
ldl_status = LDLFactor(C, NULL, NULL, &p->L, &p->D);
cs_spfree(C);
cs_spfree(K);
scs_free(Pinv);
scs_free(info);
return (ldl_status);
}
Priv * initPriv(Data * d) {
Priv * p = scs_calloc(1, sizeof(Priv));
idxint n_plus_m = d->n + d->m;
p->P = scs_malloc(sizeof(idxint) * n_plus_m);
p->L = scs_malloc(sizeof(cs));
p->bp = scs_malloc(n_plus_m * sizeof(pfloat));
p->L->m = n_plus_m;
p->L->n = n_plus_m;
p->L->nz = -1;
if (factorize(d, p) < 0) {
freePriv(p);
return NULL;
}
totalSolveTime = 0.0;
return p;
}
idxint solveLinSys(Data * d, Priv * p, pfloat * b, const pfloat * s, idxint iter) {
/* returns solution to linear system */
/* Ax = b with solution stored in b */
tic(&linsysTimer);
LDLSolve(b, b, p->L, p->D, p->P, p->bp);
totalSolveTime += tocq(&linsysTimer);
#ifdef EXTRAVERBOSE
scs_printf("linsys solve time: %1.2es\n", tocq(&linsysTimer) / 1e3);
#endif
return 0;
}
|
OpenMP_using_Reduction.c | // gcc -fopenmp -lm -D_DEFAULT_SOURCE -o OpenMP_using_Reduction OpenMP_using_Reduction.c && ./OpenMP_using_Reduction
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include <sys/time.h>
#define MAX_TERMS 2e09
#define NUMTHREADS 8
int main() {
int thread_count = NUMTHREADS;
double result = 0;
struct timeval tval_before, tval_after, tval_result;
gettimeofday(&tval_before, NULL);
#pragma omp parallel for num_threads(thread_count) reduction(+:result)
for(int i = 0; i < MAX_TERMS; ++i) {
result += 4.0 * (i % 2 == 0 ? 1: -1) / (2.0 * i + 1);
}
gettimeofday(&tval_after, NULL);
timersub(&tval_after, &tval_before, &tval_result);
printf("Time elapsed: %ld.%06ld\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec);
printf("Result: %.16lf\n", result);
return 0;
} |
saxpy.c | /**
* @file saxpy.c
*
* @brief saxpy performs the \c axpy computation in single-precision on both
* host and accelerator. The performance (in MFLOPS) on host and accelerator is
* compared and the numerical results are also verified for consistency.
*
* The \c axpy computation is defined as:
*
* y := a * x + y
*
* where:
*
* - a is a scalar.
* - x and y are vectors each with n elements.
*
* Please note that in this version only <em>one GPU thread</em> is used.
*
* Offload to GPU:
*
* gcc -fopenmp -foffload=nvptx-none saxpy.c -o saxpy
* clang -fopenmp -O3 -fopenmp-targets=nvptx64-nvidia-cuda saxpy.c -o saxpy
SOURCE: https://github.com/pc2/OMP-Offloading/blob/master/simplifiedCode/05_saxpy/saxpy.c
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#define TWO27 (1 << 27)
int main(int argc, char *argv[])
{
int i, n = TWO27,
iret = 0;
float a = 101.0f / TWO02,
*x, *y, *z;
struct timespec rt[2];
double wt; // walltime
/*
* 0. prepare x, y, and z
*
* y := a * x + y (on host)
* z := a * x + z (on accel)
*/
if (NULL == (x = (float *) malloc(sizeof(*x) * n))) {
printf("error: memory allocation for 'x'\n");
iret = -1;
}
if (NULL == (y = (float *) malloc(sizeof(*y) * n))) {
printf("error: memory allocation for 'y'\n");
iret = -1;
}
if (NULL == (z = (float *) malloc(sizeof(*z) * n))) {
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret) {
free(x);
free(y);
free(z);
exit(EXIT_FAILURE);
}
for (i = 0; i < n; i++) {
x[i] = rand() % TWO04 / (float) TWO02;
y[i] = z[i] = rand() % TWO08 / (float) TWO04;
}
/*
* 1. saxpy on host
*/
clock_gettime(CLOCK_REALTIME, rt + 0);
#pragma omp parallel \
default(none) shared(n, a, x, y) private(i)
{
#pragma omp for simd schedule(simd:static)
for (i = 0; i < n; i++) {
y[i] = a * x[i] + y[i];
}
}
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("saxpy on host : %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n / (1.0e6 * wt));
/*
* 2. saxpy on accel
*/
clock_gettime(CLOCK_REALTIME, rt + 0);
#pragma omp target device(0) \
map(to:n, a, x[0:n]) map(tofrom:z[0:n]) private(i)
{
for (i = 0; i < n; i++) {
z[i] = a * x[i] + z[i];
}
}
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("saxpy on accel: %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n / (1.0e6 * wt));
/*
* 3. verify numerical consistency
*/
for (i = 0; i < n; i++) {
iret = *(int *) (y + i) ^ *(int *) (z + i);
assert(iret == 0);
}
return 0;
}
|
GB_unop__identity_bool_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_fc64)
// op(A') function: GB (_unop_tran__identity_bool_fc64)
// C type: bool
// A type: GxB_FC64_t
// cast: bool cij = (creal (aij) != 0) || (cimag (aij) != 0)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_fc64)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nvptx_param_translate.c | // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s
// expected-no-diagnostics
// CHECK: [[MAP_FN:%.+]] = load void (i8*, ...)*, void (i8*, ...)** %
// CHECK: call void (i8*, ...) [[MAP_FN]](i8* %
int main() {
double a, b;
#pragma omp target map(tofrom \
: a) map(to \
: b)
{
#pragma omp taskgroup
#pragma omp task shared(a)
a = b;
}
return 0;
}
|
7.doacross2.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <omp.h> /* OpenMP */
#define M 4
/* Q1: In which order are the "Outside" and "Inside" messages printed? */
/* Q2: In which order are the iterations in the second loop nest executed? */
int main()
{
float a1[M][M], b1[M][M];
omp_set_num_threads(4);
#pragma omp parallel
{
#pragma omp for ordered(1) schedule(dynamic)
for (int i=0; i<M; i++) {
for (int j=0; j<M; j++) {
#pragma omp ordered depend(sink: i-1)
{
printf("Computing loop 1 iteration %d %d\n", i, j);
a1[i][j] = 3.45;
}
#pragma omp ordered depend(source)
}
for (int j=2; j<M; j++) {
#pragma omp ordered depend(sink: i)
{
printf("Computing loop 2 iteration %d %d\n", i, j);
b1[i][j] = a1[i][j-1] * 0.978;
}
#pragma omp ordered depend(source)
}
}
}
return 0;
}
|
update_ops_named_projection.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
void P0_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
P0_gate_single(target_qubit_index, state, dim);
}else {
P0_gate_parallel(target_qubit_index, state, dim);
}
#else
P0_gate_single(target_qubit_index, state, dim);
#endif
}
void P1_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
P1_gate_single(target_qubit_index, state, dim);
}
else {
P1_gate_parallel(target_qubit_index, state, dim);
}
#else
P1_gate_single(target_qubit_index, state, dim);
#endif
}
void P0_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE low_mask = mask - 1;
const ITYPE high_mask = ~low_mask;
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE temp_index = (state_index&low_mask) + ((state_index&high_mask) << 1) + mask;
state[temp_index] = 0;
}
}
void P1_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE low_mask = mask - 1;
const ITYPE high_mask = ~low_mask;
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE temp_index = (state_index&low_mask) + ((state_index&high_mask) << 1);
state[temp_index] = 0;
}
}
#ifdef _OPENMP
void P0_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE low_mask = mask - 1;
const ITYPE high_mask = ~low_mask;
ITYPE state_index;
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE temp_index = (state_index&low_mask) + ((state_index&high_mask) << 1) + mask;
state[temp_index] = 0;
}
}
void P1_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE low_mask = mask - 1;
const ITYPE high_mask = ~low_mask;
ITYPE state_index;
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE temp_index = (state_index&low_mask) + ((state_index&high_mask) << 1);
state[temp_index] = 0;
}
}
#endif
|
example-omp.c | // PWD009: Incorrect privatization in parallel region
// https://www.appentra.com/knowledge/checks/pwd009
void example(int m, double *A, double *B, double *C) {
double temp;
// "C" should be shared
#pragma omp parallel for private(temp, C)
for (int i = 0; i < m; i++) {
temp = A[i] * B[i];
C[i] = C[i] + temp;
}
}
|
max_subarray.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <math.h>
#include <time.h>
#include <limits.h>
/*
BAROUDI AYMEN
*/
struct tablo {
int * tab;
int size;
};
typedef enum {PREFIX, SUFFIX} T_SENS;
typedef enum {PLUS, MAX} OPERATEUR;
int max(int a, int b);
int apply(OPERATEUR op, int a, int b);
void initTabloNeutralElement(struct tablo* tableau, OPERATEUR op);
void generateArray(struct tablo * s);
void generateRandomArray(struct tablo * s, int size);
void printArray(struct tablo * tmp);
void montee(struct tablo * source, struct tablo * destination, OPERATEUR op, T_SENS sens);
void final(struct tablo * a, struct tablo * b, OPERATEUR op);
void descente(struct tablo * a, struct tablo * b, OPERATEUR op);
void sumPrefix(struct tablo * source, struct tablo * result);
void maxPrefix(struct tablo *source, struct tablo *result);
void sumSuffix(struct tablo *source, struct tablo *result);
void maxSuffix(struct tablo *source, struct tablo *result);
void maxSubArray(struct tablo * source, struct tablo * M);
struct tablo * maxSubSequence(struct tablo * source, struct tablo * indexes);
void writeResult(struct tablo * source, struct tablo * result, T_SENS sens);
struct tablo * findIndex(struct tablo * source);
/**
* Fonction: apply
* ---------------
* @param a un entier
* @param b un entier
* @param OP un operateur (enum OPERATEUR)
*
* Applique l'operateur op a a et b : a op b
*
* @return a OP b
*/
int apply(OPERATEUR op, int a, int b){
if (op == PLUS){
return a + b;
}
if (op == MAX){
return fmax(a,b);
}
return -1;
}
void printArray(struct tablo * tmp) {
printf("-------- Array of size %i -------- \n", tmp -> size);
int size = tmp -> size;
int i;
for (i = 0; i < size; ++i) {
printf("%i ", tmp -> tab[i]);
}
printf("\n\n");
}
void printResult(struct tablo * tmp) {
int size = tmp -> size;
int i;
for (i = 0; i < size; ++i) {
printf("%i ", tmp -> tab[i]);
}
printf("\n");
}
struct tablo * allocateTablo(int size) {
struct tablo * tmp = malloc(sizeof(struct tablo));
tmp -> size = size;
tmp -> tab = calloc(size, sizeof(int));
return tmp;
}
/**
* Fonction: initTabloNeutralElement
* ---------------
* @param tableau un pointeur vers une struct tablo
* @param OP un operateur (enum OPERATEUR)
*
* Initialise le premier element de l'arbre avec l'element neutre de l'operateur op
*/
void initTabloNeutralElement(struct tablo * tableau, OPERATEUR op){
// initialise le tableau avec l'élement neutre de l'opérateur op
if (op == PLUS){
tableau->tab[1] = 0;
}
// l'élement neutre de la fonction max (on traite que les int) est la le nombre (int) le plus petit.
if (op == MAX){
tableau->tab[1] = INT_MIN;
}
}
/**
* Fonction: max
* ---------------
* @param x un entier
* @param y un entier
* cf. geeksforgeeks
* @return max entre x et y
*/
int max(int x, int y){
return x ^ ((x ^ y) & -(x < y));
}
/**
* Fonction: montee
* ---------------
* @param source un pointeur vers une struct tablo representant le tableau source de la phase montee du scan/prefix
* @param destination un pointeur vers une struct tablo representant le tableau destination de la phase montee du scan/prefix
* On suppose que la taille de destination est 2 fois la taille du tableau source
* @param op un operateur (enum OPERATEUR)
* @param sens un sens de lecture, prefixe ou suffixe
*
* Effectue la phase montee du scan/prefix
*/
void montee(struct tablo * source, struct tablo * destination, OPERATEUR op, T_SENS sens) {
/*Phase montee */
// on suppose que le malloc de destination a ete fait avant
// Si isPref = 0 la montee est suffix
int m = log2(source->size);
//Remplissage des 0
initTabloNeutralElement(destination, op);
//Remplissage du tableau prefix
if (sens == PREFIX){
#pragma omp parallel for num_threads(omp_get_num_procs()/2)
for (int i = 0; i < source->size; i++){
destination->tab[(i+source->size)] = source->tab[i];
}
}
//Remplissage tableau suffix
else if (sens == SUFFIX){
#pragma omp parallel for num_threads(omp_get_num_procs()/2)
for (int i = 0; i < source->size; i++){
destination->tab[(i+source->size)] = source->tab[source->size - i - 1];
}
}
//Montee de l'arbre prefix
//Ajout de la borne max de la 2eme boucle pour respecter l'omp "Canonical loop form" de la doc d'OpenMP
int borneMax = 0;
for (int l = (m-1); l >= 0; l--){
borneMax = pow(2,(l+1));
#pragma omp parallel for num_threads(omp_get_num_procs()/2)
for (int j = pow(2,l); j < borneMax; j++){
destination->tab[j] = apply(op, destination->tab[2*j], destination->tab[(2*j)+1]);
}
}
}
/**
* Fonction: descente
* ---------------
* @param a un pointeur vers une struct tablo representant le tableau intermediaire apres la phase montee du scan/prefix
* @param b un pointeur vers une struct tablo representant le tableau destination de la phase descente du scan/prefix
* @param op un operateur (enum OPERATEUR)
* On suppose que la taille des deux tablo sont egaux
*
* Effectue la phase descente du scan/prefix
*/
void descente(struct tablo * a, struct tablo * b, OPERATEUR op) {
//Implementation de la phase de descente
int m = log2((a->size)/2);
//Ajout de la borne max de la 2eme boucle pour respecter l'omp "Canonical loop form" de la doc d'OpenMP
int borneMax = 0;
// Prefix
for (int l = 1; l <= m; l++){
borneMax = (pow(2,(l+1)))-1;
#pragma omp parallel for num_threads(omp_get_num_procs()/2)
for (int j = pow(2,l); j <= borneMax; j++){
if (j & 1){ // si c'est impair
b->tab[j] = apply(op, b->tab[(j-1)/2], a->tab[j-1]);
}
else{
b->tab[j] = b->tab[j/2];
}
}
}
}
/**
* Fonction: final
* ---------------
* @param a un pointeur vers une struct tablo representant le tableau intermediaire apres la phase descente du scan/prefix
* @param b un pointeur vers une struct tablo representant le tableau final de la phase final du scan/prefix
* Avec les resultat du scan/prefix dans la deuxieme moitiee du tablo b.
* @param op un operateur (enum OPERATEUR)
*
* Effectue la phase final du scan/prefix
*/
void final(struct tablo * a, struct tablo * b, OPERATEUR op) {
//phase finale
int m = log2((a->size)/2);
//Ajout de la borne max de la boucle pour respecter l'omp "Canonical loop form" de la doc d'OpenMP
int borneMax = (pow(2,(m+1)))-1;
#pragma omp parallel for num_threads(omp_get_num_procs()/2)
for (int j = pow(2,m); j <= borneMax; j++){
b->tab[j] = apply(op, b->tab[j], a->tab[j]);
}
}
/**
* Fonction: sumPrefix
* ---------------
* @param source un pointeur vers une struct tablo representant le tableau dont il faut effectuer la somme prefix
* @param result un pointeur vers une struct tablo representant le tableau final resultat de la somme prefix
* @param op un operateur (enum OPERATEUR)
*
* Effectue la somme prefix du tablo source et ecrit le resultat dans le tablo result
*/
void sumPrefix(struct tablo * source, struct tablo * result){
struct tablo * a = allocateTablo(source->size * 2);
struct tablo * b = allocateTablo(source->size * 2);
montee(source, a, PLUS, PREFIX);
//printArray(a);
initTabloNeutralElement(b, PLUS);
descente(a, b, PLUS);
//printArray(b);
final(a, b, PLUS);
//printArray(b);
writeResult(b, result, PREFIX);
//printArray(result);
free(a->tab);
free(b->tab);
free(a);
free(b);
}
/**
* Fonction: sumSuffix
* ---------------
* @param source un pointeur vers une struct tablo representant le tableau dont il faut effectuer la somme suffix
* @param result un pointeur vers une struct tablo representant le tableau final resultat de la somme suffix
* @param op un operateur (enum OPERATEUR)
*
* Effectue la somme suffix du tablo source et ecrit le resultat dans le tablo result
*/
void sumSuffix(struct tablo * source, struct tablo * result){
struct tablo * a = allocateTablo(source->size * 2);
struct tablo * b = allocateTablo(source->size * 2);
montee( source, a, PLUS, SUFFIX);
//printArray(a);
initTabloNeutralElement(b, PLUS);
descente(a, b, PLUS);
//printArray(b);
final(a, b, PLUS);
//printArray(b);
writeResult(b, result, SUFFIX);
//printArray(result);
free(a->tab);
free(b->tab);
free(a);
free(b);
}
/**
* Fonction: maxPrefix
* ---------------
* @param source un pointeur vers une struct tablo representant le tableau dont il faut effectuer le max prefix
* @param result un pointeur vers une struct tablo representant le tableau final resultat du max prefix
* @param op un operateur (enum OPERATEUR)
*
* Effectue le max prefix du tablo source et ecrit le resultat dans le tablo result
*/
void maxPrefix(struct tablo * source, struct tablo * result){
struct tablo * a = allocateTablo(source->size * 2);
struct tablo * b = allocateTablo(source->size * 2);
montee(source, a, MAX, PREFIX);
//printArray(a);
initTabloNeutralElement(b, MAX);
descente(a, b, MAX);
//printArray(b);
final(a, b, MAX);
//printArray(b);
writeResult(b, result, PREFIX);
//printArray(result);
free(a->tab);
free(b->tab);
free(a);
free(b);
}
/**
* Fonction: maxSuffix
* ---------------
* @param source un pointeur vers une struct tablo representant le tableau dont il faut effectuer le max suffix
* @param result un pointeur vers une struct tablo representant le tableau final resultat du max suffix
* @param op un operateur (enum OPERATEUR)
*
* Effectue le max suffix du tablo source et ecrit le resultat dans le tablo result
*/
void maxSuffix(struct tablo * source, struct tablo * result){
struct tablo * a = allocateTablo(source->size * 2);
struct tablo * b = allocateTablo(source->size * 2);
montee(source, a, MAX, SUFFIX);
//printArray(a);
initTabloNeutralElement(b, MAX);
descente(a, b, MAX);
//printArray(b);
final(a, b, MAX);
//printArray(b);
writeResult(b, result, SUFFIX);
//printArray(result);
free(a->tab);
free(b->tab);
free(a);
free(b);
}
/**
* Fonction: writeResult
* ---------------
* @param source un pointeur vers une struct tablo representant un tableau resultat d'un scan/(suf)prefix
* @param result un pointeur vers une struct tablo representant le tableau final resultat de la du scan/(suf)prefix
* @param sens un sens de lecture (prefix ou suffix)
*
* Parcours le tablo source dans le sens sens et ecrit dans result le resultat final du scan/(suf)prefix
*/
void writeResult(struct tablo * source, struct tablo * result, T_SENS sens){
// Lis le résultat de source et l'ecrit dans le tablo result
// si on fait un préfix on prends les elements un à un à partir de la moitiee du tablo source
int mid = source->size/2;
if (sens == PREFIX){
#pragma omp parallel for num_threads(omp_get_num_procs()/2)
for (int i = 0; i < mid; i++){
result->tab[i] = source->tab[i+mid];
}
}
else if (sens == SUFFIX){
#pragma omp parallel for num_threads(omp_get_num_procs()/2)
for (int i = 0; i < mid; i++){
result->tab[i] = source->tab[source->size - i - 1];
}
}
}
/**
* Fonction: findIndex
* ---------------
* @param source un pointeur vers une struct tablo representant un tableau M resultal de l'implementation parallele du maximum subarray
* Parcours le tablo source, stock dans un tablo l'index de debut et de fin de l'apparition du maximum du tablo source et le maximum
* @return un pointeur de struct tablo de taille 3 contenant : index de debut, index de fin
*/
struct tablo * findIndex(struct tablo * source){
int * maximum = NULL; // le maximum courant
int * actual = NULL; // pour stocker et comparer l'entier courant du tablo source et le max courant
struct tablo * bornes = allocateTablo(3); // pour stocker les bornes
maximum = &source->tab[0]; // premier maximum
for (int i = 1; i < source->size; i++){
actual = &source->tab[i]; // l'entier courant du tablo
if ((*actual) > (*maximum)){ // si l'entier courant et plus grand que l'actuel maximum
bornes->tab[0] = i; // le index de debut est donc i
bornes->tab[1] = bornes->tab[0]; // on reinitialise l'indice de fin
maximum = actual;
}
else if ((*actual) == (*maximum)){
bornes->tab[1]++;
}
//printf("actual = %d, maximum = %d");
}
bornes->tab[2] = *maximum;
return bornes;
}
/**
* Fonction: maxSubSequence
* ---------------
* @param source un pointeur vers une struct tablo representant un tableau dont on veut calculer le maximum subarray
* @param indexes un pointeur vers une struct tablo representant les indexes du debut et de fin qui contient la sequence du max subarray
* Parcours le tablo source et ecrit dans un tablo la sequence du maximum subarray du tablo source
* @return un pointeur de struct tablo contenant la sequence du max subarray
*/
struct tablo * maxSubSequence(struct tablo * source, struct tablo * indexes){
int size = indexes->tab[1] - indexes->tab[0] + 2;
struct tablo * result = allocateTablo(size);
result->tab[0] = indexes->tab[2];
#pragma omp parallel for num_threads(omp_get_num_procs()/2)
for (int i = 1; i < size; i++){
//printf("%d \n", source->tab[indexes->tab[0] + i-2]);
result->tab[i] = source->tab[indexes->tab[0] + i-1];
}
return result;
}
/**
* Fonction: maxSubSequence
* ---------------
* @param source un pointeur vers une struct tablo representant un tableau dont on veut calculer le maximum subarray
* @param M un pointeur vers une struct tablo representant ou sera ecris le resultat de l'algo parallele du maximum subarray
* implemente l'algo decrit dans :
* "Parallel Algorithms For Maximum Subsequence And Maximum Subarray (1995) by Kalyan Perumalla and Narsingh Deo"
*/
void maxSubArray(struct tablo * source, struct tablo * M){
struct tablo * PSUM = allocateTablo(source->size);
struct tablo * SSUM = allocateTablo(source->size);
struct tablo * SMAX = allocateTablo(source->size);
struct tablo * PMAX = allocateTablo(source->size);
#pragma omp parallel num_threads(omp_get_num_procs()/2)
{
sumPrefix(source, PSUM);
sumSuffix(source, SSUM);
}
#pragma omp parallel num_threads(omp_get_num_procs()/2)
{
maxPrefix(SSUM, PMAX);
maxSuffix(PSUM, SMAX);
}
#pragma omp parallel for num_threads(omp_get_num_procs()/2)
for (int i = 0; i < M->size; i++){
M->tab[i] = (PMAX->tab[i] - SSUM->tab[i] + source->tab[i]) +
(SMAX->tab[i] - PSUM->tab[i] + source->tab[i]) - source->tab[i];
}
free(PSUM->tab);
free(PSUM);
free(SSUM->tab);
free(SSUM);
free(SMAX->tab);
free(SMAX);
free(PMAX->tab);
free(PMAX);
}
void generateArray(struct tablo * s) {
//construction d'un tableau pour tester
s -> size = 16;
s -> tab = malloc(s -> size * sizeof(int));
s -> tab[0] = 3;
s -> tab[1] = 2;
s -> tab[2] = -7;
s -> tab[3] = 11;
s -> tab[4] = 10;
s -> tab[5] = -6;
s -> tab[6] = 4;
s -> tab[7] = 9;
s -> tab[8] = -6;
s -> tab[9] = 1;
s -> tab[10] = -2;
s -> tab[11] = -3;
s -> tab[12] = 4;
s -> tab[13] = -3;
s -> tab[14] = 0;
s -> tab[15] = 2;
}
void generateRandomArray(struct tablo * s, int size) {
s -> size = size;
s -> tab = malloc(size * sizeof(int));
int i;
for (i = 0; i < size; i++) {
s -> tab[i] = rand()%90001;
}
}
/**
* Fonction: readFile
* ---------------
* @param s un pointeur vers une struct tablo representant un tableau ou sera ecrit les entier du fichier fichier
* @param fichier nom d'un fichier contenant un tableau d'une taille puissance de 2
* lit le fichier et ecrit dans s les entiers dans le fichier
*/
void readFile(struct tablo * s, char * fichier){
int n = 0;
int i = 0;
FILE * file = fopen(fichier, "r");
if (file == NULL){
printf("Erreur lors de l'ouverture du fichier");
exit(-1);
}
fseek(file, 0L, SEEK_END);
s->size = ftell(file);
rewind(file);
s->tab = malloc(s->size * sizeof(int));
while(fscanf(file, "%d", &n) == 1){
s->tab[i] = n;
i++;
}
s->size = i;
fclose(file);
}
int main(int argc, char ** argv) {
srand(time(NULL));
struct tablo source;
if (argc > 1) {
//tailleArray = atoi(argv[1]);
//generateRandomArray( & source, tailleArray);
readFile(&source, argv[1]);
}
else{
generateArray(&source);
}
struct tablo * M = allocateTablo(source.size);
struct tablo * resultIndex = NULL;
struct tablo * result = NULL;
maxSubArray(&source, M);
resultIndex = findIndex(M);
//printArray(M);
result = maxSubSequence(&source, resultIndex);
printResult(result);
// pour faire plaisir à valgrind
free(source.tab);
free(M->tab);
free(M);
free(resultIndex->tab);
free(resultIndex);
free(result->tab);
free(result);
return 0;
}
|
decomp.h | /*!
* Software SPAMS v2.2 - Copyright 2009-2011 Julien Mairal
*
* This file is part of SPAMS.
*
* SPAMS is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SPAMS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SPAMS. If not, see <http://www.gnu.org/licenses/>.
*
*
* \file
* toolbox decomp
*
* by Julien Mairal
* julien.mairal@inria.fr
*
* File decomp.h
* \brief Contains sparse decomposition algorithms
* It requires the toolbox linalg */
#ifndef DECOMP_H
#define DECOMP_H
#include "utils.h"
static char low='l';
static char nonUnit='n';
/* **************************
* Greedy Forward Selection
* **************************/
/// Forward Selection (or Orthogonal matching pursuit)
/// Address the problem of:
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_0 <= L or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_0
/// s.t. ||\X_i-D\alpha_i||_2^2 <= epsilon
/// This function is
/// * based on Cholesky decompositions
/// * parallel
/// * optimized for a large number of signals (precompute the Gramm matrix
template <typename T>
void omp(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
const int *L, const T* eps, const T* lambda, const bool vecL = false,
const bool vecEps = false, const bool Lambda=false, const int numThreads=-1,
Matrix<T>* path = NULL);
template <typename T>
void omp_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
const int *L, const T* eps, const T* lambda, const bool vecL = false,
const bool vecEps = false, const bool Lambda=false, const int numThreads=-1,
Matrix<T>* path = NULL);
/// Auxiliary function of omp
template <typename T>
void coreORMP(Vector<T>& scores, Vector<T>& norm, Vector<T>& tmp,
Matrix<T>& Un, Matrix<T>& Undn, Matrix<T>& Unds, Matrix<T>& Gs,
Vector<T>& Rdn, const AbstractMatrix<T>& G, Vector<INTM>& ind,
Vector<T>& RUn, T& normX, const T* eps, const int* L, const T* lambda,
T* path = NULL);
/// Auxiliary function of omp
template <typename T>
void coreORMPB(Vector<T>& RtD, const AbstractMatrix<T>& G, Vector<INTM>& ind,
Vector<T>& coeffs, T& normX, const int L, const T eps, const T lambda = 0);
/// Auxiliary function of omp
/*template <typename T>
void coreORMPWeighted(Vector<T>& scores, Vector<T>& weights, Vector<T>& norm,
Vector<T>& tmp, Matrix<T>& Un, Matrix<T>& Undn, Matrix<T>& Unds,
Matrix<T>& Gs, Vector<T>& Rdn, const AbstractMatrix<T>& G, Vector<INTM>&
ind, Vector<T>& RUn, T& normX, const T eps, const int L, const T lambda);*/
/* **************
* LARS - Lasso
* **************/
/// Defines different types of problem,
/// - constraint on the l1 norm of the coefficients
/// - constraint on the reconstruction error
/// - l1-sparsity penalty
enum constraint_type { L1COEFFS, L2ERROR, PENALTY, SPARSITY, L2ERROR2, PENALTY2,FISTAMODE};
/// Implementation of LARS-Lasso for solving
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_1 <= constraint or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= T
/// Optionally, the solution might be positive (boolean pos), and a
/// Least-Square can be solved as a post-processing step.
/// L is a maximum number of coefficients.
/// This function is
/// * efficient (Cholesky-based)
/// * parallel
/// * optimized for a big number of signals (precompute the Gramm matrix
template <typename T>
void lasso(const Matrix<T>& X, const Matrix<T>& D,
SpMatrix<T>& spalpha,
int L, const T constraint, const T lambda2 = 0, constraint_type mode = PENALTY,
const bool pos = false, const bool ols = false, const int numThreads=-1,
Matrix<T>* path = NULL, const int length_path=-1);
template <typename T>
void lasso(const Data<T>& X, const AbstractMatrix<T>& G, const AbstractMatrix<T>& DtX,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode = PENALTY,
const bool pos = false, const bool ols = false, const int numThreads=-1,
Matrix<T>* path = NULL, const int length_path=-1);
/// second implementation using matrix inversion lemma
template <typename T>
void lasso2(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint,const T lambda2=0, constraint_type mode = PENALTY, const bool pos = false,
const int numThreads = -1, Matrix<T>* path = NULL, const int length_path=-1);
template <typename T>
void lasso2(const Data<T>& X, const AbstractMatrix<T>& G, const AbstractMatrix<T>& DtX,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode = PENALTY, const bool pos = false,
const int numThreads = -1, Matrix<T>* path = NULL, const int length_path=-1);
/// second implementation using matrix inversion lemma
template <typename T>
void lasso_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
int L, const T constraint,const T lambda2=0, constraint_type mode = PENALTY, const bool pos = false,
const int numThreads = -1);
/// second implementation using matrix inversion lemma
template <typename T>
void lassoReweighted(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const T sigma,
const int numThreads = -1);
/// Auxiliary function for lasso
template <typename T>
void coreLARS(Vector<T>& Rdn, Vector<T>& Xdn, Vector<T>& A,
Vector<T>& u, Vector<T>& sig,
Vector<T>& av, Vector<T>& RUn, Matrix<T>& Un,
Matrix<T>& Unds, Matrix<T>& Gs,
Matrix<T>& Gsa, Matrix<T>& workT, Matrix<T>& R,
const AbstractMatrix<T>& G,T& normX,
Vector<int>& ind,Vector<T>& coeffs,const T constraint,
const bool ols = false,
const bool pos =false,
constraint_type mode = L1COEFFS,
T* path = NULL, int length_path=-1);
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint, const bool pos = false,
T* pr_path = NULL, int length_path = -1);
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, T normX,
const constraint_type mode,
const T constraint, const bool pos = false);
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
const Vector<T>& weights,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint, const bool pos = false);
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, const Vector<T>& weights, T normX,
const constraint_type mode,
const T constraint, const bool pos = false);
/// Auxiliary functoni for coreLARS (Cholesky downdate)
template <typename T>
void downDateLasso(int& j,int& minBasis,T& normX,const bool ols,
const bool pos, Vector<T>& Rdn, INTM* ind,
T* coeffs, Vector<T>& sig, Vector<T>& av,
Vector<T>& Xdn, Vector<T>& RUn,Matrix<T>& Unm, Matrix<T>& Gsm,
Matrix<T>& Gsam, Matrix<T>& Undsm, Matrix<T>& Rm);
/* ************************
* Iterative thresholding
* ************************/
/// Implementation of IST for solving
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= T
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
SpMatrix<T>& spalpha, T lambda, constraint_type mode,
const int itermax=500,
const T tol = 0.5, const int numThreads = -1);
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
Matrix<T>& spalpha, T lambda, constraint_type mode,
const int itermax=500,
const T tol = 0.5, const int numThreads=-1);
/// coreIST
template <typename T>
void coreIST(const AbstractMatrix<T>& G, Vector<T>& DtR, Vector<T>& coeffs,
const T thrs, const int itermax = 500,
const T tol = 0.5);
template <typename T>
void coreISTW(const AbstractMatrix<T>& G, Vector<T>& DtR, Vector<T>& coeffs, const Vector<T>& weights,
const T thrs, const int itermax = 500,
const T tol = 0.5);
/// coreIST constrained
template <typename T>
void coreISTconstrained(const AbstractMatrix<T>& G, Vector<T>& DtR, Vector<T>& coeffs,
const T normX2,
const T thrs, const int itermax = 500,
const T tol = 0.5);
/// ist for group Lasso
template <typename T>
void ist_groupLasso(const Matrix<T>* XT, const Matrix<T>& D,
Matrix<T>* alphaT, const int Ngroups,
const T lambda, const constraint_type mode,
const int itermax = 500,
const T tol = 0.5, const int numThreads = -1);
/// Auxiliary function for ist_groupLasso
template <typename T>
void coreGroupIST(const Matrix<T>& G, Matrix<T>& RtD,
Matrix<T>& alphat,
const T thrs,
const int itermax=500,
const T tol = 0.5);
/// Auxiliary function for ist_groupLasso
template <typename T>
void coreGroupISTConstrained(const Matrix<T>& G, Matrix<T>& RtD,
Matrix<T>& alphat, const T normR,
const T eps,
const int itermax=500,
const T tol = 0.5);
/// auxiliary function for ist_groupLasso
template <typename T>
T computeError(const T normX2,const Vector<T>& norms,
const Matrix<T>& G,const Matrix<T>& RtD,const Matrix<T>& alphat);
/// auxiliary function for ist_groupLasso
template <typename T>
T computeError(const T normX2,
const Matrix<T>& G,const Vector<T>& DtR,const Vector<T>& coeffs,
SpVector<T>& coeffs_tmp);
/* ******************
* Simultaneous OMP
* *****************/
template <typename T>
void somp(const Matrix<T>* X, const Matrix<T>& D, SpMatrix<T>* spalpha,
const int Ngroups, const int L, const T* pr_eps, const bool adapt=false,
const int numThreads=-1);
template <typename T>
void somp(const Matrix<T>* X, const Matrix<T>& D, SpMatrix<T>* spalpha,
const int Ngroups, const int L, const T eps, const int numThreads=-1);
template <typename T>
void coreSOMP(const Matrix<T>& X, const Matrix<T>& D, const Matrix<T>& G,
Matrix<T>& vM,
Vector<INTM>& rv, const int L, const T eps);
/* *********************
* Implementation of OMP
* *********************/
/// Forward Selection (or Orthogonal matching pursuit)
/// Address the problem of:
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_0 <= L or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_0
/// s.t. ||\X_i-D\alpha_i||_2^2 <= epsilon
/// This function is
/// * efficient (Cholesky-based)
/// * parallel
/// * optimized for a big number of signals (precompute the Gramm matrix
template <typename T>
void omp(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
const int* pL, const T* peps, const T* pLambda,
const bool vecL, const bool vecEps,
const bool vecLambda, const int numThreads, Matrix<T>* path) {
int L;
if (!vecL) {
L=*pL;
} else {
Vector<int> vL(const_cast<int*>(pL),X.n());
L=vL.maxval();
}
spalpha.clear();
if (L <= 0) return;
const INTM M = X.n();
const INTM K = D.n();
L = MIN(X.m(),MIN(L,K));
Matrix<T> vM(L,M);
Matrix<INTM> rM(L,M);
ProdMatrix<T> G(D, K < 25000 && M > 10);
int NUM_THREADS=init_omp(numThreads);
Vector<T>* scoresT=new Vector<T>[NUM_THREADS];
Vector<T>* normT=new Vector<T>[NUM_THREADS];
Vector<T>* tmpT=new Vector<T>[NUM_THREADS];
Vector<T>* RdnT=new Vector<T>[NUM_THREADS];
Matrix<T>* UnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
scoresT[i].resize(K);
normT[i].resize(K);
tmpT[i].resize(K);
RdnT[i].resize(K);
UnT[i].resize(L,L);
UnT[i].setZeros();
UndnT[i].resize(K,L);
UndsT[i].resize(L,L);
GsT[i].resize(K,L);
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normX = Xi.nrm2sq();
Vector<INTM> ind;
rM.refCol(i,ind);
ind.set(-1);
Vector<T> RUn;
vM.refCol(i,RUn);
Vector<T>& Rdn=RdnT[numT];
D.multTrans(Xi,Rdn);
coreORMP(scoresT[numT],normT[numT],tmpT[numT],UnT[numT],UndnT[numT],UndsT[numT],
GsT[numT],Rdn,G,ind,RUn, normX, vecEps ? peps+i : peps,
vecL ? pL+i : pL, vecLambda ? pLambda+i : pLambda,
path && i==0 ? path->rawX() : NULL);
}
delete[](scoresT);
delete[](normT);
delete[](tmpT);
delete[](RdnT);
delete[](UnT);
delete[](UndnT);
delete[](UndsT);
delete[](GsT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void omp_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
const int *pL, const T* peps, const T* pLambda, const bool vecL,
const bool vecEps, const bool vecLambda, const int numThreads,
Matrix<T>* path) {
int L;
if (!vecL) {
L=*pL;
} else {
Vector<int> vL(const_cast<int*>(pL),X.n());
L=vL.maxval();
}
spalpha.clear();
if (L <= 0) return;
const int M = X.n();
const int K = D.n();
L = MIN(X.m(),MIN(L,K));
Matrix<T> vM(L,M);
Matrix<INTM> rM(L,M);
ProdMatrix<T> G(D, K < 25000 && M > 10);
int NUM_THREADS=init_omp(numThreads);
Vector<T>* scoresT=new Vector<T>[NUM_THREADS];
Vector<T>* normT=new Vector<T>[NUM_THREADS];
Vector<T>* tmpT=new Vector<T>[NUM_THREADS];
Vector<T>* RdnT=new Vector<T>[NUM_THREADS];
Matrix<T>* UnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
ProdMatrix<T>* GT=new ProdMatrix<T>[NUM_THREADS];
Matrix<T>* DmaskT=new Matrix<T>[NUM_THREADS];
Vector<T>* XmaskT=new Vector<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DmaskT[i].resize(D.m(),D.n());
XmaskT[i].resize(X.m());
scoresT[i].resize(K);
normT[i].resize(K);
tmpT[i].resize(K);
RdnT[i].resize(K);
UnT[i].resize(L,L);
UnT[i].setZeros();
UndnT[i].resize(K,L);
UndsT[i].resize(L,L);
GsT[i].resize(K,L);
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
Vector<INTM> ind;
rM.refCol(i,ind);
ind.set(-1);
Vector<T> RUn;
vM.refCol(i,RUn);
Vector<bool> maski;
mask.refCol(i,maski);
Vector<T>& Rdn=RdnT[numT];
if (maski.allfalse()) continue;
if (maski.alltrue()) {
D.multTrans(Xi,Rdn);
T normX = Xi.nrm2sq();
coreORMP(scoresT[numT],normT[numT],tmpT[numT],UnT[numT],UndnT[numT],UndsT[numT],
GsT[numT],Rdn,G,ind,RUn, normX, vecEps ? peps+i : peps,
vecL ? pL+i : pL, vecLambda ? pLambda+i : pLambda,
path && i==0 ? path->rawX() : NULL);
} else {
D.copyMask(DmaskT[numT],maski);
Xi.copyMask(XmaskT[numT],maski);
T normX = XmaskT[numT].nrm2sq();
DmaskT[numT].multTrans(XmaskT[numT],Rdn);
GT[numT].setMatrices(DmaskT[numT],false);
GT[numT].addDiag(T(1e-10));
T eps_mask= (vecEps ? *(peps+i) : *peps)*XmaskT[numT].n()/Xi.n();
coreORMP(scoresT[numT],normT[numT],tmpT[numT],
UnT[numT],UndnT[numT],UndsT[numT],
GsT[numT],Rdn,GT[numT],ind,RUn,
normX, &eps_mask, vecL ? pL+i : pL,
vecLambda ? pLambda+i : pLambda,
path && i==0 ? path->rawX() : NULL);
DmaskT[numT].setm(D.m());
DmaskT[numT].setn(D.n());
XmaskT[numT].setn(X.m());
}
}
delete[](GT);
delete[](XmaskT);
delete[](DmaskT);
delete[](scoresT);
delete[](normT);
delete[](tmpT);
delete[](RdnT);
delete[](UnT);
delete[](UndnT);
delete[](UndsT);
delete[](GsT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
/// Auxiliary function of omp
template <typename T>
void coreORMPB(Vector<T>& RtD, const AbstractMatrix<T>& G, Vector<INTM>& ind,
Vector<T>& coeffs, T& normX, const int L, const T eps, const T lambda) {
const int K = G.n();
Vector<T> scores(K);
Vector<T> norm(K);
Vector<T> tmp(K);
Matrix<T> Un(L,L);
Matrix<T> Undn(K,L);
Matrix<T> Unds(L,L);
Matrix<T> Gs(K,L);
ind.set(-1);
coreORMP(scores,norm,tmp,Un,Undn,Unds,Gs,RtD,G,ind,coeffs,normX,&eps,&L,&lambda);
};
/// Auxiliary function of omp
template <typename T>
void coreORMP(Vector<T>& scores, Vector<T>& norm, Vector<T>& tmp, Matrix<T>& Un,
Matrix<T>& Undn, Matrix<T>& Unds, Matrix<T>& Gs, Vector<T>& Rdn,
const AbstractMatrix<T>& G,
Vector<INTM>& ind, Vector<T>& RUn,
T& normX, const T* peps, const int* pL, const T* plambda,
T* path) {
const T eps = abs<T>(*peps);
const int L = MIN(*pL,Gs.n());
const T lambda=*plambda;
if ((normX <= eps) || L == 0) return;
const int K = scores.n();
scores.copy(Rdn);
norm.set(T(1.0));
Un.setZeros();
// permit unsafe low level access
T* const prUn = Un.rawX();
//T* const prUnds = Unds.rawX();
T* const prUndn = Undn.rawX();
T* const prGs = Gs.rawX();
T* const prRUn= RUn.rawX();
if (path)
memset(path,0,K*L*sizeof(T));
int j;
for (j = 0; j<L; ++j) {
const int currentInd=scores.fmax();
if (norm[currentInd] < 1e-8) {
ind[j]=-1;
break;
}
const T invNorm=T(1.0)/sqrt(norm[currentInd]);
const T RU=Rdn[currentInd]*invNorm;
const T delta = RU*RU;
if (delta < 2*lambda) {
break;
}
RUn[j]=RU;
normX -= delta;
ind[j]=currentInd;
//for (int k = 0; k<j; ++k) prUn[j*L+k]=0.0;
//prUn[j*L+j]=T(1.0);
// for (int k = 0; k<j; ++k) prUnds[k*L+j]=prUndn[k*K+currentInd];
// MGS algorithm, Update Un
// int iter = norm[currentInd] < 0.5 ? 2 : 1;
//int iter=1;
// for (int k = 0; k<iter; ++k) {
/// for (int l = 0; l<j; ++l) {
// T scal=-cblas_dot<T>(j+1-l,prUn+j*L+l,1,prUnds+l*L+l,1);
// T scal = -prUnds[l*L+j];
// cblas_axpy<T>(l+1,scal,prUn+l*L,1,prUn+j*L,1);
// }
// }
prUn[j*L+j]=-T(1.0);
cblas_copy<T>(j,prUndn+currentInd,K,prUn+j*L,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,j,prUn,L,prUn+j*L,1);
cblas_scal<T>(j+1,-invNorm,prUn+j*L,1);
if (j == L-1 || (normX <= eps)) {
++j;
break;
}
if (path) {
T* last_path=path+(L-1)*K;
cblas_copy<T>(j+1,prRUn,1,last_path,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j+1,prUn,L,last_path,1);
for (int k = 0; k<=j; ++k) {
path[j*K+ind[k]]=last_path[k];
}
}
// update the variables Gs, Undn, Unds, Rdn, norm, scores
Vector<T> Gsj;
Gs.refCol(j,Gsj);
G.copyCol(currentInd,Gsj);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prGs,K,prUn+j*L,1,
T(0.0),prUndn+j*K,1);
// prUnds[j*L+j] = prUndn[j*K+currentInd];
Vector<T> Undnj;
Undn.refCol(j,Undnj);
Rdn.add(Undnj,-RUn[j]);
tmp.sqr(Undnj);
norm.sub(tmp);
scores.sqr(Rdn);
scores.div(norm);
for (int k = 0; k<=j; ++k) scores[ind[k]]=T();
}
// compute the final coefficients
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j,prUn,L,prRUn,1);
if (path) {
memset(path+(L-1)*K,0,L*sizeof(T));
for (int k = 0; k<j; ++k) {
path[(j-1)*K+ind[k]]=prRUn[k];
}
}
};
/// Auxiliary function of omp
/*template <typename T>
void coreORMPWeighted(Vector<T>& scores, Vector<T>& weights, Vector<T>& norm, Vector<T>& tmp, Matrix<T>& Un,
Matrix<T>& Undn, Matrix<T>& Unds, Matrix<T>& Gs, Vector<T>& Rdn,
const AbstractMatrix<T>& G,
Vector<INTM>& ind, Vector<T>& RUn,
T& normX, const T peps, const int pL, const T plambda) {
const T eps = abs<T>(*peps);
const int L = MIN(*pL,Gs.n());
const T lambda=*plambda;
if ((normX <= eps) || L == 0) return;
const int K = scores.n();
scores.copy(Rdn);
scores.div(weights);
norm.set(T(1.0));
Un.setZeros();
// permit unsafe low level access
T* const prUn = Un.rawX();
T* const prUnds = Unds.rawX();
T* const prUndn = Undn.rawX();
T* const prGs = Gs.rawX();
T* const prRUn= RUn.rawX();
int j;
for (j = 0; j<L; ++j) {
const int currentInd=scores.fmax();
if (norm[currentInd] < 1e-8) {
ind[j]=-1;
break;
}
const T invNorm=T(1.0)/sqrt(norm[currentInd]);
const T RU=Rdn[currentInd]*invNorm;
const T delta = RU*RU;
if (delta < 2*lambda) {
break;
}
RUn[j]=RU;
normX -= delta;
ind[j]=currentInd;
prUn[j*L+j]=-T(1.0);
cblas_copy<T>(j,prUndn+currentInd,K,prUn+j*L,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,j,prUn,L,prUn+j*L,1);
cblas_scal<T>(j+1,-invNorm,prUn+j*L,1);
if (j == L-1 || (normX <= eps)) {
++j;
break;
}
// update the variables Gs, Undn, Unds, Rdn, norm, scores
Vector<T> Gsj;
Gs.refCol(j,Gsj);
G.copyCol(currentInd,Gsj);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prGs,K,prUn+j*L,1,
T(0.0),prUndn+j*K,1);
Vector<T> Undnj;
Undn.refCol(j,Undnj);
Rdn.add(Undnj,-RUn[j]);
tmp.sqr(Undnj);
norm.sub(tmp);
scores.sqr(Rdn);
scores.div(norm);
scores.div(weights);
for (int k = 0; k<=j; ++k) scores[ind[k]]=T();
}
// compute the final coefficients
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j,prUn,L,prRUn,1);
};*/
/* **************
* LARS - Lasso
* **************/
/// Implementation of LARS-Lasso for solving
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_1 <= constraint or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= T
/// Optionally, the solution might be positive (boolean pos), and a
/// Least-Square can be solved as a post-processing step.
/// L is a maximum number of coefficients.
/// This function is
/// * efficient (Cholesky-based)
/// * parallel
/// * optimized for a big number of signals (precompute the Gramm matrix
template <typename T>
void lasso(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T lambda, const T lambda2, constraint_type mode,
const bool pos, const bool ols, const int numThreads,
Matrix<T>* path, const int length_path) {
ProdMatrix<T> G(D, X.n() > 10 && D.n() < 50000);
G.addDiag(MAX(lambda2,1e-10));
ProdMatrix<T> DtX(D,X,false);
lasso(X,G,DtX,spalpha,L,lambda,mode,pos,ols,numThreads,path,length_path);
}
template <typename T>
void lasso(const Data<T>& X, const AbstractMatrix<T>& G,
const AbstractMatrix<T>& DtX, SpMatrix<T>& spalpha,
int L, const T lambda, constraint_type mode,
const bool pos, const bool ols, const int numThreads,
Matrix<T>* path, const int length_path) {
spalpha.clear();
const INTM M = X.n();
const INTM K = G.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
if (path) path->setZeros();
int NUM_THREADS=init_omp(numThreads);
//ProdMatrix<T> G(D, K < 25000 && M > 10);
Vector<T>* RdnT=new Vector<T>[NUM_THREADS];
Vector<T>* XdnT =new Vector<T>[NUM_THREADS];
Vector<T>* AT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Vector<T>* sigT=new Vector<T>[NUM_THREADS];
Vector<T>* avT=new Vector<T>[NUM_THREADS];
Vector<T>* RUnT = new Vector<T>[NUM_THREADS];
Matrix<T>* UnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* RT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
RdnT[i].resize(K);
if (ols) XdnT[i].resize(K);
AT[i].resize(K);
uT[i].resize(L);
sigT[i].resize(L);
avT[i].resize(L);
if (ols) RUnT[i].resize(L);
UnT[i].resize(L,L);
UnT[i].setZeros();
UndsT[i].resize(L,L);
UndsT[i].setZeros();
GsT[i].resize(K,L);
GsaT[i].resize(L,L);
workT[i].resize(K,2);
RT[i].resize(L,L);
}
Vector<T> norms;
X.norm_2sq_cols(norms);
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
T normX = norms[i];
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
coeffs.setZeros();
Vector<T>& Rdn=RdnT[numT];
DtX.copyCol(i,Rdn);
coreLARS(Rdn,XdnT[numT], AT[numT], uT[numT], sigT[numT], avT[numT],
RUnT[numT], UnT[numT], UndsT[numT], GsT[numT], GsaT[numT],
workT[numT],RT[numT],G,normX, ind,coeffs,lambda,ols,pos,
mode,path && i==0 ? path->rawX() : NULL, length_path);
}
delete[](RdnT);
delete[](XdnT);
delete[](AT);
delete[](uT);
delete[](sigT);
delete[](avT);
delete[](RUnT);
delete[](UnT);
delete[](RT);
delete[](UndsT);
delete[](GsT);
delete[](GsaT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
/// Auxiliary function for lasso
template <typename T>
void coreLARS(Vector<T>& Rdnv, Vector<T>& Xdnv, Vector<T>& Av,
Vector<T>& uv, Vector<T>& sigv, Vector<T>& avv, Vector<T>& RUnv,
Matrix<T>& Unm, Matrix<T>& Undsm, Matrix<T>& Gsm,
Matrix<T>& Gsam, Matrix<T>& workm, Matrix<T>& Rm,
const AbstractMatrix<T>& Gm,T& normX,
Vector<INTM>& indv,Vector<T>& coeffsv,const T constraint,
const bool ols,const bool pos, constraint_type mode,
T* path, int length_path) {
if (mode == L2ERROR && normX < constraint) return;
const int LL = Gsm.n();
const int K = Gsm.m();
const int L = MIN(LL,K);
if (length_path <= 1) length_path=4*L;
// permit unsafe fast low level access
T* const Rdn = Rdnv.rawX();
T* const Xdn = Xdnv.rawX();
T* const A = Av.rawX();
T* const u = uv.rawX();
T* const sig = sigv.rawX();
//T* const av = avv.rawX();
T* const RUn = RUnv.rawX();
T* const Un = Unm.rawX();
T* const Unds = Undsm.rawX();
T* const Gs = Gsm.rawX();
T* const Gsa = Gsam.rawX();
T* const work = workm.rawX();
//T* const G = Gm.rawX();
//T* const R = Rm.rawX();
INTM* ind = indv.rawX();
T* coeffs = coeffsv.rawX();
coeffsv.setZeros();
indv.set(-1);
if (ols) Xdnv.copy(Rdnv);
int currentInd= pos ? Rdnv.max() : Rdnv.fmax();
bool newAtom=true;
T Cmax = 0;
int iter=1;
T thrs = 0.0;
// INTM* const ind_orig = ind;
// T* const coeffs_orig = coeffs;
int j;
for (j = 0; j<L; ++j) {
if (newAtom) {
ind[j]=currentInd;
if (pos) {
Cmax = Rdn[currentInd];
sig[j]=1.0;
} else {
Cmax = abs<T>(Rdn[currentInd]);
sig[j] = SIGN(Rdn[currentInd]);
}
for (int k = 0; k<=j; ++k) Un[j*L+k]=0.0;
Un[j*L+j]=1.0;
Gm.extract_rawCol(currentInd,Gs+K*j);
for (int k = 0; k<j; ++k) Gs[K*j+ind[k]] *= sig[k];
if (sig[j] < 0) {
Rdn[currentInd]=-Rdn[currentInd];
if (ols) Xdn[currentInd]=-Xdn[currentInd];
cblas_scal<T>(K,sig[j],Gs+K*j,1);
cblas_scal<T>(j+1,sig[j],Gs+currentInd,K);
}
cblas_copy<T>(j+1,Gs+currentInd,K,Gsa+j*L,1);
for (int k = 0; k<j; ++k) Gsa[k*L+j]=Gsa[j*L+k];
// <d_j,d_i>
cblas_copy<T>(j,Gsa+j*L,1,Unds+j,L);
// <U_j final,d_i>
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasTrans,CblasNonUnit,
j+1,Un,L,Unds+j,L);
// norm2
T norm2=Gsa[j*L+j];
for (int k = 0; k<j; ++k) norm2 -= Unds[k*L+j]*Unds[k*L+j];
if (norm2 < 1e-15) {
ind[j]=-1;
// cerr << "bad exit" << endl;
break;
}
// int iter2 = norm2 < 0.5 ? 2 : 1;
// for(int k = 0; k<iter2; ++k) {
// for (int l = 0; l<j; ++l) {
// T scal=-cblas_dot<T>(j+1-l,Un+j*L+l,1,Unds+l*L+l,1);
// cblas_axpy<T>(l+1,scal,Un+l*L,1,Un+j*L,1);
// }
// }
Un[j*L+j]=-T(1.0);
cblas_copy<T>(j,Unds+j,L,Un+j*L,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,j,Un,L,Un+j*L,1);
/// Un is the orthogonalized vectors in the D basis
T invNorm=1.0/sqrt(norm2);
cblas_scal<T>(j+1,-invNorm,Un+j*L,1);
Unds[j*L+j]=cblas_dot<T>(j+1,Un+j*L,1,Gsa+j*L,1);
}
for (int k = 0; k<=j; ++k) u[k]=T(1.0);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasTrans,CblasNonUnit,
j+1,Un,L,u,1);
T a = T(1.0)/cblas_nrm2<T>(j+1,u,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j+1,Un,L,u,1);
cblas_scal<T>(j+1,a,u,1);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),Gs,K,u,1,T(0.0),A,1);
T potentNorm=0.0;
if (!ols) {
for (int k = 0; k<=j; ++k) potentNorm += Rdn[ind[k]]*u[k];
}
if (pos) {
for (int k = 0; k<K; ++k) {
T diff = a-A[k];
work[k]= diff <= 0 ? INFINITY : (Cmax-Rdn[k])/diff;
}
for (int k = 0; k<=j; ++k) {
work[ind[k]]=INFINITY;
}
for (int k = 0; k<K; ++k)
if (work[k] <=0) work[k]=INFINITY;
currentInd =cblas_iamin<T>(K,work,1);
} else {
memset(work,0,2*K*sizeof(T));
for (int k = 0; k<=j; ++k) {
const int index=2*ind[k];
work[index]=INFINITY;
work[index+1]=INFINITY;
}
for (int k = 0; k<K; ++k) {
const int index=2*k;
if (!work[index]) {
const T diff1=a-A[k];
work[index]= diff1 <= 0 ? INFINITY : (Cmax-Rdn[k])/diff1;
const T diff2=a+A[k];
work[index+1]=diff2 <= 0 ? INFINITY : (Cmax+Rdn[k])/diff2;
}
}
currentInd =cblas_iamin<T>(2*K,work,1);
}
T gamma=work[currentInd];
T gammaMin=0;
int minBasis=0;
//if (j == L-1) gamma=potentNorm;
if (mode == PENALTY) {
gamma=MIN(gamma,(Cmax-constraint)/a);
}
// if (j > 0) {
vDiv<T>(j+1,coeffs,u,work);
cblas_scal<T>(j+1,-T(1.0),work,1);
/// voir pour petites valeurs
for (int k=0; k<=j; ++k)
if (coeffs[k]==0 || work[k] <=0) work[k]=INFINITY;
minBasis=cblas_iamin<T>(j+1,work,1);
gammaMin=work[minBasis];
if (gammaMin < gamma) gamma=gammaMin;
// }
if (mode == L1COEFFS) {
T Tu = 0.0;
for (int k = 0; k<=j; ++k) Tu += u[k];
if (Tu > EPSILON)
gamma= MIN(gamma,(constraint-thrs)/Tu);
thrs+=gamma*Tu;
}
// compute the norm of the residdual
if (ols == 0) {
const T t = gamma*gamma - 2*gamma*potentNorm;
if (t > 0 || isnan(t) || isinf(t)) {
// cerr << "bad bad exit" << endl;
// cerr << t << endl;
ind[j]=-1;
break;
}
normX += t;
} else {
// plan the last orthogonal projection
if (newAtom) {
RUn[j]=0.0;
for (int k = 0; k<=j; ++k) RUn[j] += Xdn[ind[k]]*
Un[j*L+k];
normX -= RUn[j]*RUn[j];
}
}
// Update the coefficients
cblas_axpy<T>(j+1,gamma,u,1,coeffs,1);
if (pos) {
for (int k = 0; k<j+1; ++k)
if (coeffs[k] < 0) coeffs[k]=0;
}
cblas_axpy<T>(K,-gamma,A,1,Rdn,1);
if (!pos) currentInd/= 2;
if (path) {
for (int k = 0; k<=j; ++k)
path[iter*K+ind[k]]=coeffs[k]*sig[k];
}
if (gamma == gammaMin) {
downDateLasso<T>(j,minBasis,normX,ols,pos,Rdnv,ind,coeffs,sigv,
avv,Xdnv, RUnv, Unm, Gsm, Gsam,Undsm,Rm);
newAtom=false;
Cmax=abs<T>(Rdn[ind[0]]);
--j;
} else {
newAtom=true;
}
++iter;
if (mode == PENALTY) {
thrs=abs<T>(Rdn[ind[0]]);
}
if ((j == L-1) ||
(mode == PENALTY && (thrs - constraint < 1e-15)) ||
(mode == L1COEFFS && (thrs - constraint > -1e-15)) ||
(newAtom && mode == L2ERROR && (normX - constraint < 1e-15)) ||
(normX < 1e-15) ||
(iter >= length_path)) {
// cerr << "exit" << endl;
// PRINT_F(thrs)
// PRINT_F(constraint)
// PRINT_F(normX)
break;
}
}
if (ols) {
cblas_copy<T>(j+1,RUn,1,coeffs,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j+1,Un,L,coeffs,1);
}
vMul<T>(j+1,coeffs,sig,coeffs);
};
/// Auxiliary functoni for coreLARS (Cholesky downdate)
template <typename T>
inline void downDateLasso(int& j,int& minBasis,T& normX,const bool ols,
const bool pos,
Vector<T>& Rdnv, INTM* ind,
T* coeffs, Vector<T>& sigv, Vector<T>& avv,
Vector<T>& Xdnv, Vector<T>& RUnv,Matrix<T>& Unm, Matrix<T>& Gsm,
Matrix<T>& Gsam, Matrix<T>& Undsm, Matrix<T>& Rm) {
const int L = Gsm.n();
const int K = Gsm.m();
T* const Rdn = Rdnv.rawX();
T* const Xdn = Xdnv.rawX();
T* const sig = sigv.rawX();
T* const av = avv.rawX();
T* const RUn = RUnv.rawX();
T* const Un = Unm.rawX();
T* const Unds = Undsm.rawX();
T* const Gs = Gsm.rawX();
T* const Gsa = Gsam.rawX();
T* const R = Rm.rawX();
int indB=ind[minBasis];
if (!pos && sig[minBasis] < 0) {
// Update Rdn
Rdn[indB]=-Rdn[indB];
if (ols) Xdn[indB]=-Xdn[indB];
}
int num=j-minBasis;
for (int k = 0; k<num*num;++k) R[k]=0.0;
for (int k = 0; k<num; ++k) R[k*num+k]=1.0;
// Update Un
for (int k = minBasis+1; k<=j; ++k) {
T a = -Un[k*L+minBasis]/Un[minBasis*L+minBasis];
av[k-minBasis-1] = a;
cblas_axpy<T>(minBasis,a,Un+minBasis*L,1,Un+k*L,1);
}
for (int k = minBasis+1; k<=j; ++k) {
cblas_copy<T>(minBasis,Un+k*L,1,Un+(k-1)*L,1);
cblas_copy<T>(num,Un+k*L+minBasis+1,1,Un+(k-1)*L+minBasis,1);
}
T alpha=1.0;
T alphab,gamma;
for (int k = 0; k<num; ++k) {
alphab=alpha+av[k]*av[k];
R[k*num+k]=sqrt(alphab/alpha);
gamma=av[k]*R[k*num+k]/alphab;
alpha=alphab;
cblas_copy<T>(num-k-1,av+k+1,1,R+k*num+k+1,1);
cblas_scal<T>(num-k-1,gamma,R+k*num+k+1,1);
}
if (num > 0) {
trtri<T>(low,nonUnit,num,R,num);
cblas_trmm<T>(CblasColMajor,CblasRight,CblasLower,CblasTrans,CblasNonUnit,
j,num,T(1.0),R,num,Un+minBasis*L,L);
}
// Update Unds
for (int k = minBasis+1; k<=j; ++k)
cblas_axpy<T>(j-minBasis,av[k-minBasis-1],Unds+minBasis*L+minBasis+1,1,
Unds+k*L+minBasis+1,1);
for (int k = 0; k<minBasis; ++k)
for (int l = minBasis+1; l<=j; ++l)
Unds[k*L+l-1]=Unds[k*L+l];
for (int k = minBasis+1; k<=j; ++k)
cblas_copy<T>(j-minBasis,Unds+k*L+minBasis+1,1,Unds+(k-1)*L+minBasis,1);
if (num > 0)
cblas_trmm<T>(CblasColMajor,CblasRight,CblasLower,CblasTrans,CblasNonUnit,
j-minBasis,num,T(1.0),R,num,Unds+minBasis*L+minBasis,L);
for (int k = minBasis+1; k<=j; ++k)
for (int l = 0; l<k; ++l) Unds[k*L+l]=0.0;
// Update Gs
for (int k = minBasis+1; k<=j; ++k) {
cblas_copy<T>(K,Gs+k*K,1,Gs+(k-1)*K,1);
}
if (!pos && sig[minBasis] < T(0.0)) cblas_scal<T>(j,T(-1.0),Gs+indB,K);
// Update Gsa
for (int k = minBasis+1; k<=j; ++k) {
cblas_copy<T>(minBasis,Gsa+k*L,1,Gsa+(k-1)*L,1);
cblas_copy<T>(j-minBasis,Gsa+k*L+minBasis+1,1,Gsa+(k-1)*L+minBasis,1);
}
for (int k = 0; k<minBasis; ++k) {
for (int l = minBasis+1; l<=j; ++l) Gsa[k*L+l-1]=Gsa[k*L+l];
}
// Update sig
for (int k = minBasis+1; k<=j && !pos; ++k) sig[k-1]=sig[k];
// Update ind
for (int k = minBasis+1; k<=j; ++k) ind[k-1]=ind[k];
ind[j]=-1;
for (int k = minBasis+1; k<=j; ++k) coeffs[k-1]=coeffs[k];
coeffs[j]=0.0;
if (ols) {
// Update RUn and normX
for (int k = minBasis; k<=j; ++k)
normX += RUn[k]*RUn[k];
for (int k = minBasis; k<j; ++k) {
RUn[k]=0.0;
for (int l = 0; l<=k; ++l) RUn[k] += Xdn[ind[l]]*
Un[k*L+l];
normX -= RUn[k]*RUn[k];
}
}
// Update j
--j;
}
/// second implementation using matrix inversion lemma
template <typename T>
void lassoReweighted(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const T sigma,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = D.n();
Matrix<T> vM;
Matrix<int> rM;
vM.resize(L,M);
rM.resize(L,M);
const int iterR = 30;
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
//ProdMatrix<T> G(D, K < 25000 && M > 10);
ProdMatrix<T> G(D, K < 50000);
//Matrix<T> G;
//D.XtX(G);
G.addDiag(1e-10);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* DtRRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Vector<T>* weightsT=new Vector<T>[NUM_THREADS];
Vector<int>* inddT=new Vector<int>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
DtRRT[i].resize(K);
uT[i].resize(K);
weightsT[i].resize(K);
GT[i].resize(K,K);
inddT[i].resize(K);
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normXo = Xi.nrm2sq();
T normX = normXo;
Vector<int> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
Vector<T>& DtRR = DtRRT[numT];
D.multTrans(Xi,DtR);
DtRR.copy(DtR);
coreLARS2(DtRR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint,pos);
//Matrix<T>& GG = GT[numT];
Vector<T>& weights = weightsT[numT];
//Vector<int>& indd = inddT[numT];
for (int j = 0; j<iterR; ++j) {
const T sig = sigma*pow(0.7,iterR-1-j);
weights.set(sig);
for (int k = 0; k<K; ++k) {
if (ind[k] != -1) {
weights[ind[k]] = MAX(1e-4,sig*exp(-sig*abs<T>(coeffs[k])));
} else {
break;
}
}
DtRR.copy(DtR);
normX=normXo;
coreLARS2W(DtRR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,weights,
ind,workT[numT],normX,mode,constraint,pos);
}
}
delete[](DtRT);
delete[](DtRRT);
delete[](inddT);
delete[](uT);
delete[](weightsT);
delete[](GsT);
delete[](GT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
}
template <typename T>
void lassoWeight(const Matrix<T>& X, const Matrix<T>& D, const Matrix<T>& weights,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = D.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
//ProdMatrix<T> G(D, K < 25000 && M > 10);
ProdMatrix<T> G(D, K < 50000);
//Matrix<T> G;
//D.XtX(G);
G.addDiag(1e-10);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
uT[i].resize(K);
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normX = Xi.nrm2sq();
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
D.multTrans(Xi,DtR);
Vector<T> we;
weights.refCol(i,we);
coreLARS2W(DtR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,we,
ind,workT[numT],normX,mode,constraint,pos);
}
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void lassoWeightPreComputed(const Matrix<T>& X, const Matrix<T>& G, const Matrix<T>& DtR, const Matrix<T>& weights,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = G.n();
Matrix<T> vM;
Matrix<int> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
uT[i].resize(K);
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normX = Xi.nrm2sq();
Vector<int> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtRi=DtRT[numT];
DtR.copyCol(i,DtRi);
Vector<T> we;
weights.refCol(i,we);
coreLARS2W(DtRi,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,we,
ind,workT[numT],normX,mode,constraint,pos);
}
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
/// second implementation using matrix inversion lemma
template <typename T>
void lasso_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
int L, const T constraint,const T lambda2, constraint_type mode, const bool pos,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = D.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
ProdMatrix<T> G(D,K < 25000 && M > 10);
G.addDiag(MAX(lambda2,1e-10));
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Vector<T>* XmaskT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
ProdMatrix<T>* GT=new ProdMatrix<T>[NUM_THREADS];
Matrix<T>* DmaskT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DmaskT[i].resize(D.m(),D.n());
DtRT[i].resize(K);
uT[i].resize(K);
XmaskT[i].resize(X.m());
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
Vector<bool> maski;
mask.refCol(i,maski);
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
if (maski.allfalse()) continue;
if (maski.alltrue()) {
T normX = Xi.nrm2sq();
D.multTrans(Xi,DtR);
coreLARS2(DtR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint,pos);
} else {
D.copyMask(DmaskT[numT],maski);
Xi.copyMask(XmaskT[numT],maski);
T constraint_mask = mode == PENALTY || mode == L2ERROR ? constraint*XmaskT[numT].n()/Xi.n() : constraint;
T normX = XmaskT[numT].nrm2sq();
DmaskT[numT].multTrans(XmaskT[numT],DtR);
GT[numT].setMatrices(DmaskT[numT],false);
GT[numT].addDiag(MAX(lambda2,T(1e-10)));
coreLARS2(DtR,GT[numT],
GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint_mask,pos);
DmaskT[numT].setm(D.m());
DmaskT[numT].setn(D.n());
XmaskT[numT].setn(X.m());
}
}
delete[](GT);
delete[](XmaskT);
delete[](DmaskT);
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void lasso2(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint, const T lambda2, constraint_type mode, const bool pos,
const int numThreads, Matrix<T>* path, int length_path) {
ProdMatrix<T> G(D,X.n() > 10 && D.n() < 50000);
ProdMatrix<T> DtX(D,X,false);
G.addDiag(MAX(lambda2,1e-10));
lasso2(X,G,DtX,spalpha,L,constraint,mode,pos,numThreads,path, length_path);
}
template <typename T>
void lasso2(const Data<T>& X, const AbstractMatrix<T>& G, const AbstractMatrix<T>& DtX,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const int numThreads, Matrix<T>* path, int length_path) {
spalpha.clear();
const INTM M = X.n();
const INTM K = G.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
if (path) path->setZeros();
int NUM_THREADS=init_omp(numThreads);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
uT[i].resize(K);
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
INTM i;
Vector<T> norms;
X.norm_2sq_cols(norms);
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
// Vector<T> Xi;
// X.refCol(i,Xi);
// T normX = Xi.nrm2sq();
T normX = norms[i];
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
DtX.copyCol(i,DtR);
//D.multTrans(Xi,DtR);
coreLARS2(DtR,G,GsT[numT],GaT[numT],invGsT[numT],
uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint,pos,
path && i==0 ? path->rawX() : NULL,length_path);
}
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, const Vector<T>& weights, T normX,
const constraint_type mode,
const T constraint, const bool pos) {
const INTM p = G.m();
const INTM L = p;
Vector<T> v;
v.resize(L);
Vector<INTM> r;
r.resize(L);
Vector<T> u;
u.resize(p);
Matrix<T> Gs;
Gs.resize(L,L);
Matrix<T> invGs;
invGs.resize(L,L);
Matrix<T> Ga;
Ga.resize(p,L);
Matrix<T> work;
work.resize(p,3);
coreLARS2W(DtR,G,Gs,Ga,invGs,u,v,weights,r,work,normX,mode,constraint,pos);
coeffs.setZeros();
for (int i = 0; i< L; ++i) {
if (r[i] < 0) break;
coeffs[r[i]]=v[i];
};
};
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, T normX,
const constraint_type mode,
const T constraint, const bool pos) {
const INTM p = G.m();
const INTM L = p;
Vector<T> v;
v.resize(L);
Vector<INTM> r;
r.resize(L);
Vector<T> u;
u.resize(p);
Matrix<T> Gs;
Gs.resize(L,L);
Matrix<T> invGs;
invGs.resize(L,L);
Matrix<T> Ga;
Ga.resize(p,L);
Matrix<T> work;
work.resize(p,3);
coreLARS2(DtR,G,Gs,Ga,invGs,u,v,r,work,normX,mode,constraint,pos);
coeffs.setZeros();
for (int i = 0; i< L; ++i) {
if (r[i] < 0) break;
coeffs[r[i]]=v[i];
};
};
/// Auxiliary function for lasso
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint,
const bool pos,
T* path, int length_path) {
const int LL = Gs.n();
const int K = G.n();
const int L = MIN(LL,K);
if (length_path <= 1) length_path=4*L;
coeffs.setZeros();
ind.set(-1);
T* const pr_Gs = Gs.rawX();
T* const pr_invGs = invGs.rawX();
T* const pr_Ga = Ga.rawX();
T* const pr_work = work.rawX();
T* const pr_u = u.rawX();
T* const pr_DtR = DtR.rawX();
T* const pr_coeffs = coeffs.rawX();
INTM* const pr_ind = ind.rawX();
// Find the most correlated element
int currentInd = pos ? DtR.max() : DtR.fmax();
if (mode == PENALTY && abs(DtR[currentInd]) < constraint) return;
if (mode == L2ERROR && normX < constraint) return;
bool newAtom=true;
int i;
int iter=0;
T thrs = 0;
for (i = 0; i<L; ++i) {
++iter;
if (newAtom) {
pr_ind[i]=currentInd;
// cerr << "Add " << currentInd << endl;
G.extract_rawCol(pr_ind[i],pr_Ga+i*K);
for (int j = 0; j<=i; ++j)
pr_Gs[i*LL+j]=pr_Ga[i*K+pr_ind[j]];
// Update inverse of Gs
if (i == 0) {
pr_invGs[0]=T(1.0)/pr_Gs[0];
} else {
cblas_symv<T>(CblasColMajor,CblasUpper,i,T(1.0),
pr_invGs,LL,pr_Gs+i*LL,1,T(0.0),pr_u,1);
const T schur =
T(1.0)/(pr_Gs[i*LL+i]-cblas_dot<T>(i,pr_u,1,pr_Gs+i*LL,1));
pr_invGs[i*LL+i]=schur;
// cblas_copy<T>(i,pr_u,1,pr_invGs+i*LL,1);
memcpy(pr_invGs+i*LL,pr_u,i*sizeof(T));
cblas_scal<T>(i,-schur,pr_invGs+i*LL,1);
cblas_syr<T>(CblasColMajor,CblasUpper,i,schur,pr_u,1,
pr_invGs,LL);
}
}
// Compute the path direction
for (int j = 0; j<=i; ++j)
pr_work[j]= pr_DtR[pr_ind[j]] > 0 ? T(1.0) : T(-1.0);
cblas_symv<T>(CblasColMajor,CblasUpper,i+1,T(1.0),pr_invGs,LL,
pr_work,1,T(0.0),pr_u,1);
// Compute the step on the path
T step_max = INFINITY;
int first_zero = -1;
for (int j = 0; j<=i; ++j) {
T ratio = -pr_coeffs[j]/pr_u[j];
if (ratio > 0 && ratio <= step_max) {
step_max=ratio;
first_zero=j;
}
}
// PRINT_F(step_max)
T current_correlation = abs<T>(pr_DtR[pr_ind[0]]);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,i+1,T(1.0),pr_Ga,
K,pr_u,1,T(0.0),pr_work+2*K,1);
memcpy(pr_work+K,pr_work+2*K,K*sizeof(T));
memcpy(pr_work,pr_work+K,K*sizeof(T));
// cblas_copy<T>(K,pr_work+2*K,1,pr_work+K,1);
// cblas_copy<T>(K,pr_work+2*K,1,pr_work,1);
for (int j = 0; j<=i; ++j) {
pr_work[pr_ind[j]]=INFINITY;
pr_work[pr_ind[j]+K]=INFINITY;
}
for (int j = 0; j<K; ++j) {
pr_work[j] = ((pr_work[j] < INFINITY) && (pr_work[j] > T(-1.0))) ? (pr_DtR[j]+current_correlation)/(T(1.0)+pr_work[j]) : INFINITY;
}
// work.print("work");
for (int j = 0; j<K; ++j) {
pr_work[j+K] = ((pr_work[j+K] < INFINITY) && (pr_work[j+K] < T(1.0))) ? (current_correlation-pr_DtR[j])/(T(1.0)-pr_work[j+K]) : INFINITY;
}
// work.print("work");
if (pos) {
for (int j = 0; j<K; ++j) {
pr_work[j]=INFINITY;
}
}
// work.print("work");
// coeffs.print("coeffs");
int index = cblas_iamin<T>(2*K,pr_work,1);
T step = pr_work[index];
// Choose next element
currentInd = index % K;
// compute the coefficients of the polynome representing normX^2
T coeff1 = 0;
for (int j = 0; j<=i; ++j)
coeff1 += pr_DtR[pr_ind[j]] > 0 ? pr_u[j] : -pr_u[j];
T coeff2 = 0;
for (int j = 0; j<=i; ++j)
coeff2 += pr_DtR[pr_ind[j]]*pr_u[j];
T coeff3 = normX-constraint;
T step_max2;
if (mode == PENALTY) {
step_max2 = current_correlation-constraint;
} else if (mode == L2ERROR) {
/// L2ERROR
const T delta = coeff2*coeff2-coeff1*coeff3;
step_max2 = delta < 0 ? INFINITY : (coeff2-sqrt(delta))/coeff1;
step_max2 = MIN(current_correlation,step_max2);
} else {
/// L1COEFFS
step_max2 = coeff1 < 0 ? INFINITY : (constraint-thrs)/coeff1;
step_max2 = MIN(current_correlation,step_max2);
}
step = MIN(MIN(step,step_max2),step_max);
if (step == INFINITY) break; // stop the path
// Update coefficients
cblas_axpy<T>(i+1,step,pr_u,1,pr_coeffs,1);
if (pos) {
for (int j = 0; j<i+1; ++j)
if (pr_coeffs[j] < 0) pr_coeffs[j]=0;
}
// Update correlations
cblas_axpy<T>(K,-step,pr_work+2*K,1,pr_DtR,1);
// Update normX
normX += coeff1*step*step-2*coeff2*step;
// Update norm1
thrs += step*coeff1;
if (path) {
for (int k = 0; k<=i; ++k)
path[iter*K+ind[k]]=pr_coeffs[k];
}
// Choose next action
if (step == step_max) {
// cerr << "Remove " << pr_ind[first_zero] << endl;
/// Downdate, remove first_zero
/// Downdate Ga, Gs, invGs, ind, coeffs
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(K,pr_Ga+(j+1)*K,1,pr_Ga+j*K,1);
pr_ind[j]=pr_ind[j+1];
pr_coeffs[j]=pr_coeffs[j+1];
}
pr_ind[i]=-1;
pr_coeffs[i]=0;
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_Gs+(j+1)*LL,1,pr_Gs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_Gs+(j+1)*LL+first_zero+1,1,
pr_Gs+j*LL+first_zero,1);
}
const T schur = pr_invGs[first_zero*LL+first_zero];
cblas_copy<T>(first_zero,pr_invGs+first_zero*LL,1,pr_u,1);
cblas_copy<T>(i-first_zero,pr_invGs+(first_zero+1)*LL+first_zero,LL,
pr_u+first_zero,1);
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_invGs+(j+1)*LL,1,pr_invGs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_invGs+(j+1)*LL+first_zero+1,1,
pr_invGs+j*LL+first_zero,1);
}
cblas_syr<T>(CblasColMajor,CblasUpper,i,T(-1.0)/schur,
pr_u,1,pr_invGs,LL);
newAtom=false;
i=i-2;
} else {
newAtom=true;
}
if ((iter >= length_path-1) || abs(step) < 1e-15 ||
step == step_max2 || (normX < 1e-15) ||
(i == (L-1)) ||
(mode == L2ERROR && normX - constraint < 1e-15) ||
(mode == L1COEFFS && (constraint-thrs < 1e-15))) {
break;
}
}
}
/// Auxiliary function for lasso
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
const Vector<T>& weights,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint,
const bool pos) {
const int LL = Gs.n();
const int K = G.n();
const int L = MIN(LL,K);
coeffs.setZeros();
ind.set(-1);
T* const pr_Gs = Gs.rawX();
T* const pr_invGs = invGs.rawX();
T* const pr_Ga = Ga.rawX();
// T* const pr_G = G.rawX();
T* const pr_work = work.rawX();
T* const pr_u = u.rawX();
T* const pr_DtR = DtR.rawX();
T* const pr_coeffs = coeffs.rawX();
T* const pr_weights = weights.rawX();
INTM* const pr_ind = ind.rawX();
DtR.div(weights);
// Find the most correlated element
int currentInd = pos ? DtR.max() : DtR.fmax();
if (mode == PENALTY && abs(DtR[currentInd]) < constraint) return;
if (mode == L2ERROR && normX < constraint) return;
bool newAtom=true;
int i;
int iter=0;
T thrs = 0;
for (i = 0; i<L; ++i) {
++iter;
if (newAtom) {
pr_ind[i]=currentInd;
// Update upper part of Gs and Ga
G.extract_rawCol(pr_ind[i],pr_Ga+i*K);
for (int j = 0; j<=i; ++j)
pr_Gs[i*LL+j]=pr_Ga[i*K+pr_ind[j]];
// Update inverse of Gs
if (i == 0) {
pr_invGs[0]=T(1.0)/pr_Gs[0];
} else {
cblas_symv<T>(CblasColMajor,CblasUpper,i,T(1.0),
pr_invGs,LL,pr_Gs+i*LL,1,T(0.0),pr_u,1);
const T schur =
T(1.0)/(pr_Gs[i*LL+i]-cblas_dot<T>(i,pr_u,1,pr_Gs+i*LL,1));
pr_invGs[i*LL+i]=schur;
cblas_copy<T>(i,pr_u,1,pr_invGs+i*LL,1);
cblas_scal<T>(i,-schur,pr_invGs+i*LL,1);
cblas_syr<T>(CblasColMajor,CblasUpper,i,schur,pr_u,1,
pr_invGs,LL);
}
}
// Compute the path direction
for (int j = 0; j<=i; ++j)
pr_work[j]= pr_DtR[pr_ind[j]] > 0 ? weights[pr_ind[j]] : -weights[pr_ind[j]];
cblas_symv<T>(CblasColMajor,CblasUpper,i+1,T(1.0),pr_invGs,LL,
pr_work,1,T(0.0),pr_u,1);
// Compute the step on the path
T step_max = INFINITY;
int first_zero = -1;
for (int j = 0; j<=i; ++j) {
T ratio = -pr_coeffs[j]/pr_u[j];
if (ratio > 0 && ratio <= step_max) {
step_max=ratio;
first_zero=j;
}
}
T current_correlation = abs<T>(pr_DtR[pr_ind[0]]);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,i+1,T(1.0),pr_Ga,
K,pr_u,1,T(0.0),pr_work+2*K,1);
vDiv<T>(K,pr_work+2*K,pr_weights,pr_work+2*K);
cblas_copy<T>(K,pr_work+2*K,1,pr_work+K,1);
cblas_copy<T>(K,pr_work+2*K,1,pr_work,1);
for (int j = 0; j<=i; ++j) {
pr_work[pr_ind[j]]=INFINITY;
pr_work[pr_ind[j]+K]=INFINITY;
}
for (int j = 0; j<K; ++j) {
pr_work[j] = ((pr_work[j] < INFINITY) && (pr_work[j] > T(-1.0))) ? (pr_DtR[j]+current_correlation)/(T(1.0)+pr_work[j]) : INFINITY;
}
for (int j = 0; j<K; ++j) {
pr_work[j+K] = ((pr_work[j+K] < INFINITY) && (pr_work[j+K] < T(1.0))) ? (current_correlation-pr_DtR[j])/(T(1.0)-pr_work[j+K]) : INFINITY;
}
if (pos) {
for (int j = 0; j<K; ++j) {
pr_work[j]=INFINITY;
}
}
int index = cblas_iamin<T>(2*K,pr_work,1);
T step = pr_work[index];
// Choose next element
currentInd = index % K;
// compute the coefficients of the polynome representing normX^2
T coeff1 = 0;
for (int j = 0; j<=i; ++j)
coeff1 += pr_DtR[pr_ind[j]] > 0 ? pr_weights[pr_ind[j]]*pr_u[j] :
-pr_weights[pr_ind[j]]*pr_u[j];
T coeff2 = 0;
for (int j = 0; j<=i; ++j)
coeff2 += pr_DtR[pr_ind[j]]*pr_u[j]*pr_weights[pr_ind[j]];
T coeff3 = normX-constraint;
T step_max2;
if (mode == PENALTY) {
step_max2 = current_correlation-constraint;
} else if (mode == L2ERROR) {
/// L2ERROR
const T delta = coeff2*coeff2-coeff1*coeff3;
step_max2 = delta < 0 ? INFINITY : (coeff2-sqrt(delta))/coeff1;
} else {
/// L1COEFFS
step_max2 = coeff1 < 0 ? INFINITY : (constraint-thrs)/coeff1;
}
step = MIN(MIN(step,step_max2),step_max);
if (step == INFINITY) break; // stop the path
// Update coefficients
cblas_axpy<T>(i+1,step,pr_u,1,pr_coeffs,1);
// Update correlations
cblas_axpy<T>(K,-step,pr_work+2*K,1,pr_DtR,1);
// Update normX
normX += coeff1*step*step-2*coeff2*step;
// Update norm1
thrs += step*coeff1;
if (step == step_max) {
/// Downdate, remove first_zero
/// Downdate Ga, Gs, invGs, ind, coeffs
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(K,pr_Ga+(j+1)*K,1,pr_Ga+j*K,1);
pr_ind[j]=pr_ind[j+1];
pr_coeffs[j]=pr_coeffs[j+1];
}
pr_ind[i]=-1;
pr_coeffs[i]=0;
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_Gs+(j+1)*LL,1,pr_Gs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_Gs+(j+1)*LL+first_zero+1,1,
pr_Gs+j*LL+first_zero,1);
}
const T schur = pr_invGs[first_zero*LL+first_zero];
cblas_copy<T>(first_zero,pr_invGs+first_zero*LL,1,pr_u,1);
cblas_copy<T>(i-first_zero,pr_invGs+(first_zero+1)*LL+first_zero,LL,
pr_u+first_zero,1);
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_invGs+(j+1)*LL,1,pr_invGs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_invGs+(j+1)*LL+first_zero+1,1,
pr_invGs+j*LL+first_zero,1);
}
cblas_syr<T>(CblasColMajor,CblasUpper,i,T(-1.0)/schur,
pr_u,1,pr_invGs,LL);
newAtom=false;
i=i-2;
} else {
newAtom=true;
}
// Choose next action
if (iter > 4*L || abs(step) < 1e-10 ||
step == step_max2 || (normX < 1e-10) ||
(i == (L-1)) ||
(mode == L2ERROR && normX - constraint < 1e-10) ||
(mode == L1COEFFS && (constraint-thrs < 1e-10))) {
break;
}
}
}
/* ************************
* Iterative thresholding
* ************************/
/// Implementation of IST for solving
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= lambda
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
SpMatrix<T>& spalpha, T lambda, constraint_type mode,
const int itermax,
const T tol,
const int numThreads) {
Matrix<T> alpha;
spalpha.toFull(alpha);
spalpha.clear();
ist(X,D,alpha,lambda,mode,itermax,tol,numThreads);
alpha.toSparse(spalpha);
}
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
Matrix<T>& alpha, T lambda, constraint_type mode,
const int itermax,
const T tol, const int numThreads) {
if (mode == L1COEFFS) {
std::cerr << "Mode not implemented" << std::endl;
return;
}
int K=D.n();
int M=X.n();
alpha.resize(K,M);
if (!D.isNormalized()) {
cerr << "Current implementation of IST does not support non-normalized dictionaries" << endl;
return;
}
/// compute the Gram Matrix G=D'D
//CachedProdMatrix<T> G(D, K < 20000 && M*K/10 > K);
//ProdMatrix<T> G(D, K < 20000 && M*K/10 > K);
Matrix<T> G;
D.XtX(G);
// for (int i = 0; i<K; ++i) G[i*K+i] += 1e-6;
G.addDiag(1e-12);
ProdMatrix<T> DtX(D,X,false);
int NUM_THREADS=init_omp(numThreads);
Vector<T>* DtRT= new Vector<T>[NUM_THREADS];
SpVector<T>* spAlphaT= new SpVector<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
spAlphaT[i].resize(K);
};
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> coeffs;
alpha.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
SpVector<T>& spAlpha=spAlphaT[numT];
T norm1 = coeffs.asum();
// Compute DtR
DtX.copyCol(i,DtR);
Vector<T> Xi;
X.refCol(i,Xi);
T normX2 = Xi.nrm2sq();
if (norm1 > EPSILON) {
coeffs.toSparse(spAlpha);
G.mult(spAlpha,DtR,-1.0,1.0);
}
if (mode == PENALTY) {
coreIST(G,DtR,coeffs,lambda,itermax,tol);
} else {
coreISTconstrained(G,DtR,coeffs,normX2,lambda,itermax,tol);
}
}
delete[](DtRT);
delete[](spAlphaT);
}
/*template <typename T>
inline void generalCD(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,
const T lambda, const int itermax, const T tol) {
Vector<T> diag;
G.diag(diag);
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
for (int iter=0; iter < itermax; ++iter) {
if (iter % 5 == 0) {
T eps1=DtRv.fmaxval()/lambda-1;
if (eps1 <= tol) {
T eps2=1e10;
for (int jj=0; jj<K; ++jj) {
if (coeffs[jj] > 0) {
eps2=MIN(DtR[jj],eps2);
} else if (coeffs[jj] < 0) {
eps2=MIN(-DtR[jj],eps2);
}
}
eps2=-(eps2/lambda-1);
if (eps2 <= tol)
break;
}
}
for (int j = 0; j <K; ++j) {
T crit=DtR[j]+coeffs[j]*diag[j];
if (crit > lambda) {
T diff=coeffs[j];
coeffs[j]=(crit-lambda)/diag[j];
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
} else if (crit < -lambda) {
T diff=coeffs[j];
coeffs[j]=(crit+lambda)/diag[j];
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
} else if (coeffs[j]) {
G.add_rawCol(j,DtR,coeffs[j]);
coeffs[j]=T();
}
}
}
}*/
template <typename T>
inline void coreIST(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,
const T thrs, const int itermax,
const T tol) {
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
const T lambda_init=thrs;
T maxDtR = DtRv.fmaxval();
T norm1=coeffsv.asum();
T lambda=lambda_init;
vAdd(K,DtR,coeffs,DtR);
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
if (DtR[j] > lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]-lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (DtR[j] < -lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]+lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (coeffs[j]) {
T diff=coeffs[j];
coeffs[j]=T();
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
}
}
if (iter % 5 == 1) {
vSub(K,DtR,coeffs,DtR);
maxDtR = DtRv.fmaxval();
norm1 =T();
T DtRa = T();
for (int j = 0; j<K; ++j) {
if (coeffs[j]) {
norm1 += abs(coeffs[j]);
DtRa += DtR[j]*coeffs[j];
}
}
vAdd(K,DtR,coeffs,DtR);
const T kappa = -DtRa+norm1*maxDtR;
if (abs(lambda - maxDtR) < tol && kappa <= tol)
break;
}
}
}
template <typename T>
inline void coreISTW(const Matrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,const Vector<T>& weightsv,
const T lambda, const int itermax,
const T tol) {
T opt=0;
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const weights = weightsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
const T nrm = G(j,j);
const T u = DtR[j]/nrm+coeffs[j];
const T thrs = lambda*weights[j]/nrm;
if (u > thrs) {
T diff=coeffs[j];
coeffs[j]=u-thrs;
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (u < -thrs) {
T diff=coeffs[j];
coeffs[j]=u+thrs;
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (coeffs[j]) {
G.add_rawCol(j,DtR,coeffs[j]);
coeffs[j]=0;
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
}
}
if (iter % 10 == 0) {
opt=0;
for (int j = 0; j <K; ++j) {
if (coeffs[j] > 0) {
opt=MAX(opt,abs<T>(T(1.0)-DtR[j]/(weights[j]*lambda)));
} else if (coeffs[j] < 0) {
opt=MAX(opt,abs<T>(T(1.0)+DtR[j]/(lambda*weights[j])));
} else {
opt=MAX(opt,abs<T>(DtR[j]/(lambda*weights[j]))-T(1.0));
}
}
if (opt < tol) break;
}
}
}
/*template <typename T>
inline void coreIST_unnormalized(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,
const T thrs, const int itermax,
const T tol) {
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
const T lambda_init=thrs;
T maxDtR = DtRv.fmaxval();
T norm1=coeffsv.asum();
T lambda=lambda_init;
DtRv.add(coeffsv);
// vAdd(K,DtR,coeffs,DtR);
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
if (DtR[j] > lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]-lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
} else if (DtR[j] < -lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]+lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
} else if (coeffs[j]) {
T diff=coeffs[j];
coeffs[j]=T();
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
}
}
if (iter % 5 == 1) {
vSub(K,DtR,coeffs,DtR);
maxDtR = DtRv.fmaxval();
norm1 =T();
T DtRa = T();
for (int j = 0; j<K; ++j) {
if (coeffs[j]) {
norm1 += abs(coeffs[j]);
DtRa += DtR[j]*coeffs[j];
}
}
DtRv.add(coeffs);
const T kappa = -DtRa+norm1*maxDtR;
if (abs(lambda - maxDtR) < tol && kappa <= tol)
break;
}
}
}*/
/// coreIST constrained
template <typename T>
void coreISTconstrained(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>&
coeffsv, const T normX2, const T eps, const int itermax, const T tol) {
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
T err = normX2;
T norm1 = coeffsv.asum();
if (!norm1 && err <= eps) return;
T current_tol = 10.0*tol;
T maxDtR = DtRv.fmaxval();
T lambda = maxDtR;
T lambdasq= lambda*lambda;
if (!norm1) {
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
Vector<int> indices(K);
indices.set(-1);
int* const pr_indices=indices.rawX();
int count;
for (int iter=0; iter < itermax; ++iter) {
count=0;
T old_err = err;
for (int j = 0; j <K; ++j) {
// Soft-thresholding
T old_coeff = coeffs[j];
T diff = DtR[j]+old_coeff;
if (diff > lambda) {
coeffs[j] = diff - lambda;
err+=lambdasq-DtR[j]*DtR[j];
pr_indices[count++]=j;
} else if (diff < - lambda) {
coeffs[j] = diff + lambda;
err+=lambdasq-DtR[j]*DtR[j];
pr_indices[count++]=j;
} else {
coeffs[j]=T();
if (old_coeff) {
err+=diff*diff-DtR[j]*DtR[j];
}
}
// Update DtR
diff = old_coeff-coeffs[j];
if (diff) {
G.add_rawCol(j,DtR,diff);
//cblas_axpy<T>(K,old_coeff-coeffs[j],prG+j*K,1,DtR,1);
}
}
maxDtR = DtRv.fmaxval();
norm1 =T();
T DtRa = T();
for (int j = 0; j<count; ++j) {
const int ind = pr_indices[j];
norm1 += abs(coeffs[ind]);
DtRa += DtR[ind]*coeffs[ind];
}
if (norm1-DtRa/maxDtR <= current_tol) {
const bool change = ((old_err > eps) && err < eps+current_tol) ||
(old_err < eps && err > eps-current_tol);
if (change) {
if (current_tol == tol) {
break;
} else {
current_tol = MAX(current_tol*0.5,tol);
}
}
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
}
};
/// ist for group Lasso
template <typename T>
void ist_groupLasso(const Matrix<T>* XT, const Matrix<T>& D,
Matrix<T>* alphaT, const int Ngroups,
const T lambda, const constraint_type mode,
const int itermax,
const T tol, const int numThreads) {
int K=D.n();
int n = D.m();
if (!D.isNormalized()) {
cerr << "Current implementation of block coordinate descent does not support non-normalized dictionaries" << endl;
return;
}
if (mode == L1COEFFS) {
std::cerr << "Mode not implemented" << std::endl;
return;
}
/// compute the Gram Matrix G=D'D
Matrix<T> G;
D.XtX(G);
int NUM_THREADS=init_omp(numThreads);
Matrix<T>* RtDT = new Matrix<T>[NUM_THREADS];
Matrix<T>* alphatT = new Matrix<T>[NUM_THREADS];
int i;
#pragma omp parallel for private(i)
for (i = 0; i< Ngroups; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
const Matrix<T>& X = XT[i];
int M = X.n();
Matrix<T>& alphat = alphatT[numT];
alphaT[i].transpose(alphat);
Matrix<T>& RtD = RtDT[numT];
X.mult(D,RtD,true,false);
Vector<T> col, col2;
T norm1 = alphat.asum();
T normX2 = 0;
if (!norm1) {
Vector<T> DtR_mean(K);
Vector<T> coeffs_mean(K);
coeffs_mean.setZeros();
RtD.meanRow(DtR_mean);
coeffs_mean.setZeros();
if (mode == PENALTY) {
coreIST(G,DtR_mean,coeffs_mean,lambda/T(2.0),itermax,tol);
} else {
Vector<T> meanVec(n);
X.meanCol(meanVec);
normX2=meanVec.nrm2sq();
coreISTconstrained(G,DtR_mean,coeffs_mean,normX2,
lambda,itermax,tol);
SpVector<T> spalpha(K);
normX2-=computeError(normX2,G,DtR_mean,coeffs_mean,spalpha);
normX2=X.normFsq()-M*normX2;
}
alphat.fillRow(coeffs_mean);
}
if (M > 1) {
for (int j = 0; j<K; ++j) {
alphat.refCol(j,col);
const T nrm=col.nrm2sq();
if (nrm) {
G.refCol(j,col2);
RtD.rank1Update(col,col2,T(-1.0));
}
}
if (mode == PENALTY) {
coreGroupIST(G,RtD,alphat,sqr<T>(M)*lambda/T(2.0),itermax,sqr<T>(M)*tol);
} else {
coreGroupISTConstrained(G,RtD,alphat,normX2,M*lambda,itermax,sqr<T>(M)*tol);
}
}
alphat.transpose(alphaT[i]);
}
delete[](RtDT);
delete[](alphatT);
};
template <typename T>
void coreGroupIST(const Matrix<T>& G, Matrix<T>& RtDm,
Matrix<T>& coeffsm,
const T thrs,
const int itermax,
const T tol) {
const int K = G.n();
const int M = RtDm.m();
T* const prG = G.rawX();
T* const RtD = RtDm.rawX();
T* const coeffs = coeffsm.rawX();
const T lambda_init=thrs;
T lambda=lambda_init;
Vector<T> old_coeffv(M);
T* const old_coeff = old_coeffv.rawX();
Vector<T> normsv(K);
T* const norms = normsv.rawX();
coeffsm.norm_2_cols(normsv);
Vector<T> normRtDv(K);
Vector<int> activatev(K);
activatev.set(3);
int* const activate=activatev.rawX();
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
if (activate[j] >= 0) {
if (norms[j]) {
cblas_copy(M,coeffs+j*M,1,old_coeff,1);
vAdd(M,coeffs+j*M,RtD+j*M,coeffs+j*M);
const T nrm = cblas_nrm2(M,coeffs+j*M,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
vSub(M,old_coeff,coeffs+j*M,old_coeff);
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
activate[j]=5;
} else {
memset(coeffs+j*M,0,M*sizeof(T));
norms[j]=T();
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
--activate[j];
}
} else {
cblas_copy(M,RtD+j*M,1,old_coeff,1);
const T nrm = cblas_nrm2(M,old_coeff,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_copy(M,old_coeff,1,coeffs+j*M,1);
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
cblas_ger(CblasColMajor,M,K,T(-1.0),coeffs+j*M,1,prG+j*K,1,RtD,M);
activate[j]=5;
} else {
activate[j] = (activate[j] == 0) ? -10 : activate[j]-1;
}
}
} else {
++activate[j];
}
}
if (iter % 5 == 4) {
T norm1=normsv.asum();
RtDm.norm_2sq_cols(normRtDv);
T maxDtR = sqr(normRtDv.maxval());
T DtRa=T();
for (int j = 0; j<K; ++j) {
if (norms[j]) {
DtRa += cblas_dot(M,coeffs+j*M,1,RtD+j*M,1);
}
}
if ((maxDtR - lambda) < (tol*maxDtR/norm1) && norm1-DtRa/maxDtR < tol) break;
}
}
};
/// Auxiliary function for ist_groupLasso
template <typename T>
void coreGroupISTConstrained(const Matrix<T>& G, Matrix<T>& RtDm,
Matrix<T>& coeffsm, const T normR,
const T eps,
const int itermax,
const T tol) {
const int K = G.n();
const int M = RtDm.m();
T* const prG = G.rawX();
T* const RtD = RtDm.rawX();
T* const coeffs = coeffsm.rawX();
T err = normR;
Vector<T> old_coeffv(M);
T* const old_coeff = old_coeffv.rawX();
Vector<T> normsv(K);
T* const norms = normsv.rawX();
coeffsm.norm_2_cols(normsv);
Vector<T> normRtDv(K);
RtDm.norm_2sq_cols(normRtDv);
Vector<int> activatev(K);
activatev.set(3);
int* const activate=activatev.rawX();
T norm1 = normsv.sum();
if (!norm1 && err <= eps) return;
T current_tol = 10.0*tol;
T maxDtR = sqr(normRtDv.maxval());
T lambda = maxDtR;
T lambdasq= lambda*lambda;
if (!norm1) {
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
for (int iter=0; iter < itermax; ++iter) {
T old_err = err;
for (int j = 0; j <K; ++j) {
if (activate[j] >= 0) {
if (norms[j]) {
cblas_copy(M,coeffs+j*M,1,old_coeff,1);
vAdd(M,coeffs+j*M,RtD+j*M,coeffs+j*M);
const T nrm = cblas_nrm2(M,coeffs+j*M,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
vSub(M,old_coeff,coeffs+j*M,old_coeff);
err += cblas_dot(M,old_coeff,1,old_coeff,1)
+2*cblas_dot(M,old_coeff,1,RtD+j*M,1);
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
activate[j]=3;
} else {
memset(coeffs+j*M,0,M*sizeof(T));
norms[j]=T();
err += cblas_dot(M,old_coeff,1,old_coeff,1)
+2*cblas_dot(M,old_coeff,1,RtD+j*M,1);
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
--activate[j];
}
} else {
cblas_copy(M,RtD+j*M,1,old_coeff,1);
const T nrm = cblas_nrm2(M,old_coeff,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_copy(M,old_coeff,1,coeffs+j*M,1);
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
err += cblas_dot(M,coeffs+j*M,1,coeffs+j*M,1)
-2*cblas_dot(M,coeffs+j*M,1,RtD+j*M,1);
cblas_ger(CblasColMajor,M,K,T(-1.0),coeffs+j*M,1,prG+j*K,1,RtD,M);
activate[j]=3;
} else {
activate[j] = (activate[j] == 0) ? -3 : activate[j]-1;
}
}
} else {
++activate[j];
}
}
norm1 = normsv.sum();
RtDm.norm_2sq_cols(normRtDv);
maxDtR = sqr(normRtDv.maxval());
T DtRa=T();
for (int j = 0; j<K; ++j) {
if (norms[j]) {
DtRa += cblas_dot(M,coeffs+j*M,1,RtD+j*M,1);
}
}
if (norm1-DtRa/maxDtR <= current_tol) {
const T tol_bis=current_tol*maxDtR;
const bool change = ((old_err > eps) && err < eps+tol_bis) ||
(old_err < eps && err > eps-tol_bis);
if (change) {
if (current_tol == tol) {
break;
} else {
current_tol = MAX(current_tol*0.5,tol);
}
}
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
}
};
/// auxiliary function for ist_groupLasso
template <typename T>
T computeError(const T normX2,const Vector<T>& norms,
const Matrix<T>& G,const Matrix<T>& RtD,const Matrix<T>& alphat) {
T err2 = normX2;
Vector<T> col,col2;
for (int j = 0; j<G.n(); ++j) {
if (norms[j] > EPSILON) {
alphat.refCol(j,col);
RtD.refCol(j,col2);
err2 -= 2*col.dot(col2);
T add = 0.0;
for (int k = 0; k<j; ++k) {
if (norms[k] > EPSILON) {
alphat.refCol(k,col2);
add -= G(j,k)*col.dot(col2);
}
}
add += add - G(j,j)*col.nrm2sq();
err2 += add;
}
}
return err2;
}
/// auxiliary function for
template <typename T>
T computeError(const T normX2,
const Matrix<T>& G,const Vector<T>& DtR,const Vector<T>& coeffs,
SpVector<T>& spAlpha) {
coeffs.toSparse(spAlpha);
return normX2 -G.quad(spAlpha)-2*DtR.dot(spAlpha);
};
/* ******************
* Simultaneous OMP
* *****************/
template <typename T>
void somp(const Matrix<T>* X, const Matrix<T>& D, SpMatrix<T>* spalpha,
const int Ngroups, const int L, const T eps,const int numThreads) {
somp(X,D,spalpha,Ngroups,L,&eps,false,numThreads);
}
template <typename T>
void somp(const Matrix<T>* XT, const Matrix<T>& D, SpMatrix<T>* spalphaT,
const int Ngroups, const int LL, const T* eps, const bool adapt,
const int numThreads) {
if (LL <= 0) return;
const INTM K = D.n();
const INTM L = MIN(D.m(),MIN(LL,K));
if (!D.isNormalized()) {
cerr << "Current implementation of OMP does not support non-normalized dictionaries" << endl;
return;
}
/// compute the Gram Matrix G=D'D
Matrix<T> G;
D.XtX(G);
init_omp(numThreads);
int i;
#pragma omp parallel for private(i)
for (i = 0; i< Ngroups; ++i) {
const Matrix<T>& X = XT[i];
const INTM M = X.n();
SpMatrix<T>& spalpha = spalphaT[i];
spalpha.clear();
Vector<INTM> rv;
Matrix<T> vM;
T thrs = adapt ? eps[i] : M*(*eps);
coreSOMP(X,D,G,vM,rv,L,thrs);
spalpha.convert2(vM,rv,K);
}
}
template <typename T>
void coreSOMP(const Matrix<T>& X, const Matrix<T>& D, const Matrix<T>& G,
Matrix<T>& v,
Vector<INTM>& r, const int L, const T eps) {
const int K = G.n();
const int n = D.m();
const int M = X.n();
const bool big_mode = M*K*(n+L) > 2*(M*n*n+K*n*(n+L));
r.resize(L);
r.set(-1);
v.resize(0,X.n());
if (M == 1) {
Vector<T> scores(K);
Vector<T> norm(K);
Vector<T> tmp(K);
Matrix<T> Un(L,L);
Un.setZeros();
Matrix<T> Undn(K,L);
Matrix<T> Unds(L,L);
Matrix<T> Gs(K,L);
Vector<T> Rdn(K);
Vector<T> Xt(X.rawX(),n);
D.multTrans(Xt,Rdn);
Vector<T> RUn(L);
T normX = Xt.nrm2sq();
T lambda=0;
coreORMP(scores,norm,tmp,Un,Undn,Unds,Gs,Rdn,G,r,RUn,normX,&eps,&L,&lambda);
int count=0;
for (int i = 0; i<L; ++i) {
if (r[i] == -1) break;
++count;
}
v.resize(count,X.n());
Vector<T> v1(v.rawX(),count);
Vector<T> v2(RUn.rawX(),count);
v1.copy(v2);
return;
}
Matrix<T> XXtD;
Matrix<T> XtD;
T E;
if (big_mode) {
Matrix<T> XXt;
X.XXt(XXt);
E = XXt.trace();
if (E < eps) return;
XXt.mult(D,XXtD);
} else {
E=X.normFsq();
if (E < eps) return;
X.mult(D,XtD,true);
}
Matrix<T> A(K,L);
A.setZeros();
Matrix<T> B(L,K);
B.setZeros();
Matrix<T> S(L,L);
S.setZeros();
Matrix<T> Fs(K,L);
Fs.setZeros();
Matrix<T> Gs(K,L);
Gs.setZeros();
Matrix<T> As(L,L);
As.setZeros();
Vector<T> tmp(K);
Vector<T> e(K);
G.diag(e);
Vector<T> f(K);
if (big_mode) {
for (int i = 0; i<K; ++i) {
Vector<T> di;
D.refCol(i,di);
Vector<T> di2;
XXtD.refCol(i,di2);
f[i]=di.dot(di2);
}
} else {
XtD.norm_2sq_cols(f);
}
Vector<T> c(L);
c.setZeros();
Vector<T> scores(K);
/// permit unsafe fast low level accesses
T* const prAs = As.rawX();
T* const prA = A.rawX();
T* const prS = S.rawX();
T* const prGs = Gs.rawX();
T* const prFs = Fs.rawX();
T* const prB = B.rawX();
T* const pr_c = c.rawX();
T* const pr_tmp = tmp.rawX();
int j;
for (j = 0; j<L; ++j) {
scores.copy(f);
scores.div(e);
for (int k = 0; k<j; ++k) scores[r[k]]=-1.0;
const int currentInd = scores.max();
const T invNorm=T(1.0)/sqrt(e[currentInd]);
if (invNorm > 1e3) {
j=j-1;
break;
}
r[j]=currentInd;
E -= scores[currentInd];
for (int k = 0; k<j; ++k) prS[j*L+k]=T();
prS[j*L+j]=T(1.0);
for (int k = 0; k<j; ++k) prAs[k*L+j]=prA[k*K+currentInd];
/// Cholesky update with partial reorthogonalization
int iter = invNorm > 1.41 ? 2 : 1;
for (int k = 0; k<iter; ++k) {
for (int l = 0; l<j; ++l) {
T scal = -cblas_dot<T>(j-l+1,prAs+l*L+l,1,prS+j*L+l,1);
cblas_axpy<T>(l+1,scal,prS+l*L,1,prS+j*L,1);
}
}
cblas_scal<T>(j+1,invNorm,prS+j*L,1);
if (j == L-1 || E <= eps) {
++j;
break;
}
/// Update e,f,scores,A,B,As,Bs,Fs,Gs,S,c
/// Gs,S,A,As, e, Fs, B,c
Vector<T> Gsj;
Gs.refCol(j,Gsj);
G.copyCol(currentInd,Gsj);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prGs,K,prS+j*L,1,
T(0.0),prA+j*K,1);
prAs[j*L+j]=prA[j*K+currentInd];
Vector<T> Aj;
A.refCol(j,Aj);
tmp.sqr(Aj);
e.sub(tmp);
Vector<T> Fsj;
Fs.refCol(j,Fsj);
if (big_mode) {
Vector<T> di;
D.refCol(currentInd,di);
XXtD.multTrans(di,Fsj);
} else {
Vector<T> di;
XtD.refCol(currentInd,di);
XtD.multTrans(di,Fsj);
}
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prFs,K,prS+j*L,1,
T(0.0),prB+j,L);
for (int k = 0; k<j;++k) pr_c[k]=T();
for (int k = 0; k<=j;++k)
cblas_axpy<T>(j,prS[j*L+k],prB+r[k]*L,1,pr_c,1);
f.add(tmp,f[currentInd]*invNorm*invNorm);
if (j > 0) {
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j,T(1.0),prA,K,pr_c,1,
T(0.0),pr_tmp,1);
} else {
tmp.setZeros();
}
cblas_axpy<T>(K,T(-1.0),prB+j,L,pr_tmp,1);
tmp.mult(tmp,Aj);
f.add(tmp,T(2.0));
}
A.clear();
B.clear();
Fs.clear();
Gs.clear();
As.clear();
if (j == 0) return;
Matrix<T> SSt;
S.upperTriXXt(SSt,j);
Matrix<T> Dg(n,j);
for (int i = 0; i<j;++i) {
Vector<T> Dgi;
Dg.refCol(i,Dgi);
D.copyCol(r[i],Dgi);
}
Matrix<T> SStDt;
SSt.mult(Dg,SStDt,false,true);
SStDt.mult(X,v);
};
#endif // DECOMP_H
|
gt.filter.c | /*
* PROJECT: GEM-Tools library
* FILE: gt.filter.c
* DATE: 02/08/2012
* AUTHOR(S): Santiago Marco-Sola <santiagomsola@gmail.com>
* DESCRIPTION: Application to filter {MAP,SAM,FASTQ} files and output the filtered result
*/
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#include "gem_tools.h"
#define GT_FILTER_FLOAT_NO_VALUE (-1.0)
#define gt_filter_cond_fatal_error_msg(condition,error_msg,args...) \
gt_cond_fatal_error_msg(condition,error_msg ". File '%s', line %"PRIu64"\n",##args, \
parameters.name_input_file,__buffered_input->current_line_num-1)
#define gt_filter_fatal_error_msg(error_msg,args...) \
gt_fatal_error_msg(error_msg ". File '%s', line %"PRIu64"\n",##args, \
parameters.name_input_file,__buffered_input->current_line_num-1)
typedef struct {
uint64_t min;
uint64_t max;
} gt_filter_quality_range;
typedef struct {
/* I/O */
char* name_input_file;
char* name_output_file;
char* name_reference_file;
char* name_gem_index_file;
char* annotation;
gt_gtf* gtf;
bool mmap_input;
bool paired_end;
bool no_output;
gt_file_format output_format;
bool discarded_output;
bool check_duplicates;
char* name_discarded_output_file;
gt_file_format discarded_output_format;
/* Filter Read/Qualities */
bool hard_trim;
uint64_t left_trim;
uint64_t right_trim;
bool restore_trim;
bool uniform_read;
bool uniform_read_strict;
bool qualities_to_offset_33;
bool qualities_to_offset_64;
bool remove_qualities;
bool add_qualities;
/* Filter Template/Alignments */
bool mapped;
bool unmapped;
int64_t unique_level;
float min_length;
float max_length;
int64_t min_maps;
int64_t max_maps;
float max_strata_after_map;
/* Make templates unique */
int64_t reduce_to_unique_strata;
int64_t reduce_by_quality;
bool reduce_to_pairs;
uint64_t reduce_to_unique;
bool reduce_by_gene_id;
bool reduce_to_protein_coding;
/* RNA Seq to recalculate counters */
bool reduce_by_junctions;
bool no_split_maps;
bool only_split_maps;
bool no_penalty_for_splitmaps;
uint64_t min_intron_length;
uint64_t min_block_length;
/* Filter SE-Maps */
bool first_map;
bool keep_first_map;
bool keep_unique;
bool matches_pruning;
uint64_t max_decoded_matches;
uint64_t min_decoded_strata;
uint64_t max_output_matches;
uint64_t max_input_matches;
bool make_counters;
bool only_unmapped;
bool only_mapped;
float min_event_distance;
float max_event_distance;
float min_levenshtein_distance;
float max_levenshtein_distance;
gt_vector* map_ids;
gt_shash* gtf_types;
bool filter_by_strand_se;
bool allow_strand_r;
bool allow_strand_f;
gt_vector* quality_score_ranges; /* (gt_filter_quality_range) */
/* Filter PE-Maps */
int64_t max_inss;
int64_t min_inss;
bool filter_by_strand_pe;
bool allow_strand_rf;
bool allow_strand_fr;
bool allow_strand_ff;
bool allow_strand_rr;
/* Filter-Realign */
bool mismatch_recovery;
bool realign_hamming;
bool realign_levenshtein;
/* Checking/Report */
bool check;
bool check_format;
gt_file_format check_file_format;
/* Hidden */
bool special_functionality;
bool error_plot; // Print error distribution (depreciated)
bool insert_size_plot; // Print insert size distribution (depreciated)
bool show_sequence_list; // Display sequence list in the GEMindex/.fa...
bool display_pretty; // Display pretty printed map(s)
bool group_reads; // Group previously split reads
bool sample_read; // Sample the read in chunks (annotated by chunk group)
float split_chunk_size;
float split_step_size;
float split_left_trim;
float split_right_trim;
float split_min_remainder;
/* Misc */
uint64_t num_threads;
bool verbose;
/* Control flags */
bool perform_dna_map_filter; // Any DNA-filtering criteria activated
bool perform_rna_map_filter; // Any RNA-filtering criteria activated
bool perform_annotation_filter; // Any annotation based filtering criteria activated
bool load_index;
} gt_filter_args;
gt_filter_args parameters = {
/* I/O */
.name_input_file=NULL,
.name_output_file=NULL,
.name_reference_file=NULL,
.name_gem_index_file=NULL,
.annotation = NULL,
.gtf = NULL,
.mmap_input=false,
.paired_end=false,
.no_output=false,
.output_format=FILE_FORMAT_UNKNOWN,
.discarded_output = false,
.name_discarded_output_file=NULL,
.discarded_output_format=FILE_FORMAT_UNKNOWN,
.check_duplicates=false,
/* Filter Read/Qualities */
.hard_trim=false,
.left_trim=0,
.right_trim=0,
.restore_trim=false,
.uniform_read=false,
.uniform_read_strict=false,
.qualities_to_offset_33=false,
.qualities_to_offset_64=false,
.remove_qualities=false,
.add_qualities=false,
/* Filter Template/Alignments */
.mapped=false,
.unmapped=false,
.unique_level=-1,
.min_length=-1.0,
.max_length=-1.0,
.min_maps=-1,
.max_strata_after_map=-1.0,
.max_maps=-1,
/* Make templates unique */
.reduce_to_unique_strata=-1,
.reduce_by_gene_id=false,
.reduce_by_junctions=false,
.reduce_to_protein_coding=false,
.reduce_to_unique=UINT64_MAX,
.reduce_to_pairs=false,
.reduce_by_quality=-1,
/* RNA Seq */
.no_split_maps=false,
.only_split_maps=false,
.no_penalty_for_splitmaps=false,
.min_intron_length=0,
.min_block_length=0,
/* Filter SE-Maps */
.first_map=false,
.keep_first_map=false,
.keep_unique=false,
.matches_pruning=false,
.max_decoded_matches=GT_ALL,
.min_decoded_strata=0,
.max_output_matches=GT_ALL,
.max_input_matches=GT_ALL,
.make_counters=false,
.only_unmapped=false,
.only_mapped=false,
.min_event_distance=GT_FILTER_FLOAT_NO_VALUE,
.max_event_distance=GT_FILTER_FLOAT_NO_VALUE,
.min_levenshtein_distance=GT_FILTER_FLOAT_NO_VALUE,
.max_levenshtein_distance=GT_FILTER_FLOAT_NO_VALUE,
.map_ids=NULL,
.gtf_types=NULL,
.filter_by_strand_se=false,
.allow_strand_r=false,
.allow_strand_f=false,
.quality_score_ranges = NULL,
/* Filter PE-Maps */
.max_inss=INT64_MAX,
.min_inss=INT64_MIN,
.filter_by_strand_pe=false,
.allow_strand_rf=false,
.allow_strand_fr=false,
.allow_strand_ff=false,
.allow_strand_rr=false,
/* Filter-Realign */
.mismatch_recovery=false,
.realign_hamming=false,
.realign_levenshtein=false,
/* Checking/Report */
.check = false,
.check_format = false,
/* Hidden */
.special_functionality = false,
.error_plot = false,
.insert_size_plot = false,
.show_sequence_list = false,
.display_pretty = false,
.group_reads = false,
.sample_read = false,
.split_chunk_size = -1.0,
.split_step_size = -1.0,
.split_left_trim = -1.0,
.split_right_trim = -1.0,
.split_min_remainder = 0.0,
/* Misc */
.num_threads=1,
.verbose=false,
/* Control flags */
.perform_dna_map_filter=false,
.perform_rna_map_filter=false,
.perform_annotation_filter=false,
.load_index=false
};
/*
* Helper to get num maps correctly also for unpaired
* mapped pairs
*/
GT_INLINE uint64_t gt_filter_get_num_maps(gt_template* template){
GT_TEMPLATE_IF_SE_ALIGNMENT(template) {
return gt_template_get_num_mmaps(template);
} else {
if (!gt_template_is_mapped(template)) {
GT_TEMPLATE_REDUCE_BOTH_ENDS(template,alignment_end1,alignment_end2);
return gt_alignment_get_num_maps(alignment_end1) + gt_alignment_get_num_maps(alignment_end2);
} else {
return gt_template_get_num_mmaps(template);
}
}
}
/*
* Checking/(Re)Aligning/MismsRecovery
*/
GT_INLINE void gt_filter_mismatch_recovery_maps(
char* const name_input_file,const uint64_t current_line_num,
gt_template* const template,gt_sequence_archive* const sequence_archive) {
// Unfolded as to report errors in the recovery
gt_status error_code;
uint64_t alignment_pos = 0;
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
uint64_t map_pos = 0;
GT_ALIGNMENT_ITERATE(alignment,map) {
if ((error_code=gt_map_recover_mismatches_sa(map,alignment->read,sequence_archive))) {
gt_error_msg("Unrecoverable Alignment '%s':%"PRIu64"\n\tREAD::'"PRIgts"':%"PRIu64":%"PRIu64" ",
name_input_file,current_line_num,PRIgts_content(template->tag),alignment_pos,map_pos);
gt_output_map_fprint_map_pretty_sa(stdout,map,alignment->read,sequence_archive);
}
++map_pos;
}
gt_alignment_recalculate_counters(alignment);
++alignment_pos;
}
if (gt_template_get_num_blocks(template)>1) gt_template_recalculate_counters(template);
}
GT_INLINE bool gt_filter_check_maps(
char* const name_input_file,const uint64_t current_line_num,
gt_template* const template,gt_sequence_archive* const sequence_archive,
uint64_t* const total_algs_checked,uint64_t* const total_algs_correct,
uint64_t* const total_maps_checked,uint64_t* const total_maps_correct) {
bool alignment_correct=true;
gt_status error_code;
uint64_t alignment_pos = 0;
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
uint64_t map_pos = 0;
GT_ALIGNMENT_ITERATE(alignment,map) {
if ((error_code=gt_map_check_alignment_sa(map,alignment->read,sequence_archive))) {
gt_error_msg("Wrong Alignment '%s':%"PRIu64"\n\tREAD::'"PRIgts"':%"PRIu64":%"PRIu64" ",
name_input_file,current_line_num,PRIgts_content(template->tag),alignment_pos,map_pos);
gt_output_map_fprint_map_pretty_sa(stdout,map,alignment->read,sequence_archive);
alignment_correct = false;
} else {
++(*total_maps_correct);
}
++(*total_maps_checked);
++map_pos;
}
++alignment_pos;
}
++(*total_algs_checked);
if (alignment_correct) {
++(*total_algs_correct);
return true;
} else {
return false;
}
}
/*
* Filtering MAPs functions
*/
void gt_filter_delete_map_ids(gt_vector* filter_map_ids) {
// Free vector
if (filter_map_ids!=NULL) {
GT_VECTOR_ITERATE(filter_map_ids,map_id,pos,gt_string*) {
gt_string_delete(*map_id);
}
gt_vector_delete(filter_map_ids);
}
}
GT_INLINE bool gt_filter_is_sequence_name_allowed(gt_string* const seq_name) {
GT_VECTOR_ITERATE(parameters.map_ids,map_id,pos,gt_string*) {
if (gt_string_equals(seq_name,*map_id)) return true;
}
return false;
}
GT_INLINE bool gt_filter_is_quality_value_allowed(const uint64_t quality_score) {
GT_VECTOR_ITERATE(parameters.quality_score_ranges,quality_range,pos,gt_filter_quality_range) {
if (quality_score >= quality_range->min && quality_score <= quality_range->max) return true;
}
return false;
}
GT_INLINE void gt_filter_prune_matches(gt_template* const template) {
uint64_t max_num_matches = GT_ALL;
if (parameters.max_decoded_matches!=GT_ALL || parameters.min_decoded_strata!=0) {
uint64_t max_strata;
gt_counters_calculate_num_maps(gt_template_get_counters_vector(template),
parameters.min_decoded_strata,parameters.max_decoded_matches,&max_strata,&max_num_matches);
}
if (parameters.max_output_matches!=GT_ALL) {
max_num_matches = GT_MIN(max_num_matches,parameters.max_output_matches);
}
// Reduce matches
if (max_num_matches < GT_ALL) {
gt_template_reduce_mmaps(template,max_num_matches);
}
}
GT_INLINE bool gt_filter_has_junction(gt_map* const map,const uint64_t start,const uint64_t end) {
GT_MAP_ITERATE(map,map_block) {
if (gt_map_has_next_block(map_block)) {
const bool forward = (gt_map_get_strand(map_block) == FORWARD);
// Is the junction in the overlap ?
const uint64_t junctions_start = gt_map_get_end_mapping_position(forward ? map_block: gt_map_get_next_block(map_block)) + 1;
const uint64_t junctions_end = gt_map_get_begin_mapping_position(forward ? gt_map_get_next_block(map_block): map_block) - 1;
if (junctions_start == start && junctions_end == end) return true;
}
}
return false;
}
GT_INLINE uint64_t gt_filter_count_junctions_in_region(gt_map* const map,const uint64_t start,const uint64_t end) {
uint64_t count = 0;
GT_MAP_ITERATE(map,map_block) {
if (gt_map_has_next_block(map_block)) {
const bool forward = (gt_map_get_strand(map_block) == FORWARD);
// Is the junction in the overlap ?
const uint64_t junctions_start = gt_map_get_end_mapping_position(forward ? map_block: gt_map_get_next_block(map_block)) + 1;
const uint64_t junctions_end = gt_map_get_begin_mapping_position(forward ? gt_map_get_next_block(map_block): map_block) - 1;
if ((junctions_end >= start) && (junctions_start <= end)){
count++;
}
}
}
return count;
}
GT_INLINE bool gt_filter_are_overlapping_pairs_coherent(gt_map** const mmap) {
if ((mmap[0] == NULL || mmap[1] == NULL) || (!gt_map_has_next_block(mmap[0]) && !gt_map_has_next_block(mmap[1]))) return true;
// Check overlap
uint64_t overlap_start, overlap_end;
if (gt_map_block_overlap(mmap[0],mmap[1],&overlap_start,&overlap_end)) {
uint64_t junctions_in_1 = gt_filter_count_junctions_in_region(mmap[0], overlap_start, overlap_end);
uint64_t junctions_in_2 = gt_filter_count_junctions_in_region(mmap[1], overlap_start, overlap_end);
if(junctions_in_1 != junctions_in_2) return false;
GT_MAP_ITERATE(mmap[0],map_block) {
if (gt_map_has_next_block(map_block)) {
const bool forward = (gt_map_get_strand(map_block) == FORWARD);
// Is the junction in the overlap ?
const uint64_t junctions_start = gt_map_get_end_mapping_position(forward ? map_block: gt_map_get_next_block(map_block)) + 1;
const uint64_t junctions_end = gt_map_get_begin_mapping_position(forward ? gt_map_get_next_block(map_block): map_block) - 1;
// Find the junctions start in the other map
if (junctions_start >= overlap_start && junctions_start < overlap_end &&
!gt_filter_has_junction(mmap[1],junctions_start,junctions_end)) {
return false; // Start not found, not overlapping split maps
}
}
}
}
return true;
}
GT_INLINE void gt_filter_add_from_hit(gt_template* const template,gt_gtf_hit* hit, uint64_t target_block) {
if (hit->mmap != NULL) {
// add PE
gt_map** mmap_copy = gt_mmap_array_copy(hit->mmap, hit->num_template_blocks);
gt_template_insert_mmap(template,mmap_copy,hit->map_attributes, parameters.check_duplicates);
free(mmap_copy);
} else if(hit->map != NULL) {
if(target_block > 0){
GT_TEMPLATE_REDUCE_BOTH_ENDS(template,alignment_1, alignment_2);
if(target_block == 1){
gt_alignment_insert_map(alignment_1,gt_map_copy(hit->map), parameters.check_duplicates);
}else{
gt_alignment_insert_map(alignment_2,gt_map_copy(hit->map), parameters.check_duplicates);
}
}else{
GT_TEMPLATE_REDUCTION(template,alignment_dst);
gt_alignment_insert_map(alignment_dst,gt_map_copy(hit->map), parameters.check_duplicates);
}
}
}
GT_INLINE bool gt_filter_make_reduce_by_annotation_alignment(gt_template* const template_dst,gt_alignment* const alignment, uint64_t block, gt_gtf_hits* hits) {
bool filtered = false;
gt_gtf_search_alignment_hits(parameters.gtf, hits, alignment);
bool prot_coding = (parameters.reduce_to_protein_coding && hits->num_protein_coding >= 1);
bool gene_id = (parameters.reduce_by_gene_id && hits->num_paired_genes >= 1);
bool junction_hits = (parameters.reduce_by_junctions && hits->junction_hit_ration > 0.0);
if(gene_id || prot_coding){
GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){
gt_gtf_hit* hit = *e;
if(junction_hits){
double junction_ratio = hit->num_junctions == 0 ? -1.0 : (double)hit->num_junctions_hits/(double)hit->num_junctions;
if(junction_ratio > 0.0 && junction_ratio != hits->junction_hit_ration) continue;
}
if(gene_id && !hit->pairs_gene)continue;
if(prot_coding && !hit->is_protein_coding)continue;
filtered = true;
gt_filter_add_from_hit(template_dst, hit, block);
}
}
return filtered;
}
GT_INLINE bool gt_filter_make_reduce_by_annotation(gt_template* const template_dst,gt_template* const template_src) {
bool filtered = false;
GT_TEMPLATE_IF_SE_ALIGNMENT(template_src) {
GT_TEMPLATE_REDUCTION(template_src,alignment_src);
gt_gtf_hits* hits = gt_gtf_hits_new();
filtered = gt_filter_make_reduce_by_annotation_alignment(template_dst, alignment_src, 0, hits);
gt_gtf_hits_delete(hits);
return filtered;
} else {
if (!gt_template_is_mapped(template_src)) {
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_end1,alignment_end2);
gt_gtf_hits* hits = gt_gtf_hits_new();
filtered = gt_filter_make_reduce_by_annotation_alignment(template_dst, alignment_end1, 1, hits);
if(!filtered){
// add all as we want to preserve them in case second alignment is filtered.
GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){
gt_filter_add_from_hit(template_dst, *e, 1);
}
}
gt_gtf_hits_clear(hits);
if(gt_filter_make_reduce_by_annotation_alignment(template_dst, alignment_end2, 2, hits)){
filtered = true;
}else if(filtered){
// alignment 1 was filtered, so we have to copy all from alignment 2
GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){
gt_filter_add_from_hit(template_dst, *e, 2);
}
}
gt_gtf_hits_delete(hits);
return filtered;
} else {
gt_gtf_hits* hits = gt_gtf_hits_new();
gt_gtf_search_template_hits(parameters.gtf, hits, template_src);
bool prot_coding = (parameters.reduce_to_protein_coding && hits->num_protein_coding >= 1);
bool gene_id = (parameters.reduce_by_gene_id && hits->num_paired_genes >= 1);
bool junction_hits = (parameters.reduce_by_junctions && hits->junction_hit_ration > 0.0);
if(gene_id || prot_coding || junction_hits){
GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){
gt_gtf_hit* hit = *e;
if(junction_hits){
double junction_ratio = hit->num_junctions == 0 ? -1.0 : (double)hit->num_junctions_hits/(double)hit->num_junctions;
if(junction_ratio > 0.0 && junction_ratio != hits->junction_hit_ration)continue;
}
if(gene_id && !hit->pairs_gene)continue;
if(prot_coding && !hit->is_protein_coding)continue;
filtered = true;
gt_filter_add_from_hit(template_dst, hit, 0);
}
}
gt_gtf_hits_delete(hits);
}
}
return filtered;
}
void gt_alignment_reduction_filter(gt_alignment* const alignment_dst,gt_alignment* const alignment_src,const gt_file_format file_format) {
// Reduction by unique level (can be calculated beforehand)
GT_ALIGNMENT_ITERATE(alignment_src,map) {
if (parameters.reduce_to_unique_strata >= 0 &&
(gt_alignment_get_uniq_degree(alignment_src) >= parameters.reduce_to_unique_strata)) {
gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates);
break;
}
if(gt_alignment_get_num_maps(alignment_src) > parameters.reduce_to_unique) break;
gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates);
}
}
void gt_alignment_dna_filter(gt_alignment* const alignment_dst,gt_alignment* const alignment_src,const gt_file_format file_format) {
const uint64_t first_matching_distance = gt_counters_get_min_matching_strata(gt_alignment_get_counters_vector(alignment_src)) - 1;
const uint64_t max_mismatch_quality = gt_alignment_get_max_mismatch_quality(alignment_src);
// Reduction by unique level (can be calculated beforehand)
bool pick_only_first_map = false;
/*
* (1) Pre-filtering steps
*/
gt_map* first_map = NULL;
if (parameters.keep_first_map && gt_alignment_get_num_maps(alignment_src)>0) {
first_map = gt_map_copy(gt_alignment_get_map(alignment_src,0));
}
/*
* (2) Filtering of maps
*/
GT_ALIGNMENT_ITERATE(alignment_src,map) {
// Check sequence name
if (parameters.map_ids!=NULL) {
if (!gt_filter_is_sequence_name_allowed(map->seq_name)) continue;
}
// Filter strata beyond first mapping
const int64_t current_stratum = parameters.no_penalty_for_splitmaps ? gt_map_get_no_split_distance(map) : gt_map_get_global_distance(map);
if (parameters.max_strata_after_map >= 0.0 &&
(current_stratum-first_matching_distance) > gt_alignment_get_read_proportion(alignment_src,parameters.max_strata_after_map)) break;
// Check strata
if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
const uint64_t total_distance = parameters.no_penalty_for_splitmaps ? gt_map_get_no_split_distance(map) : gt_map_get_global_distance(map);
if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance < gt_alignment_get_read_proportion(alignment_src,parameters.min_event_distance)) continue;
}
if (parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance > gt_alignment_get_read_proportion(alignment_src,parameters.max_event_distance)) continue;
}
}
// Check levenshtein distance
if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
const uint64_t total_distance = gt_map_get_global_levenshtein_distance(map);
if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance < gt_alignment_get_read_proportion(alignment_src,parameters.min_levenshtein_distance)) continue;
}
if (parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance > gt_alignment_get_read_proportion(alignment_src,parameters.max_levenshtein_distance)) continue;
}
}
// Filter strand
if (parameters.filter_by_strand_se) {
if (map->strand==FORWARD && !parameters.allow_strand_f) continue;
if (map->strand==REVERSE && !parameters.allow_strand_r) continue;
}
// Filter quality scores
if (parameters.quality_score_ranges!=NULL) {
if (!gt_filter_is_quality_value_allowed((file_format==SAM) ? map->phred_score : map->gt_score)) continue;
}
/*
* (3) Reduction of all maps
*/
if (parameters.reduce_by_quality >= 0) {
const int64_t q = gt_alignment_sum_mismatch_qualities(alignment_src,map);
if (q!=0 && q!=max_mismatch_quality && abs(max_mismatch_quality-q)<=parameters.reduce_by_quality) continue;
}
/*
* Insert the map
*/
gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates);
// Skip the rest if first map is enabled
if (parameters.first_map || pick_only_first_map) break;
}
/*
* (4) Post-filtering steps
*/
if (parameters.keep_first_map) {
if (gt_alignment_get_num_maps(alignment_dst)==0) {
gt_alignment_insert_map(alignment_dst,first_map, parameters.check_duplicates);
} else {
gt_map_delete(first_map);
}
}
}
void gt_template_reduction_filter(gt_template* const template_dst,gt_template* const template_src,const gt_file_format file_format) {
GT_TEMPLATE_IF_SE_ALIGNMENT(template_src) {
GT_TEMPLATE_REDUCTION(template_src,alignment_src);
GT_TEMPLATE_REDUCTION(template_dst,alignment_dst);
gt_alignment_reduction_filter(alignment_dst,alignment_src,file_format);
} else {
if (!gt_template_is_mapped(template_src)) {
if(!parameters.reduce_to_pairs){
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_src_end1,alignment_src_end2);
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_dst,alignment_dst_end1,alignment_dst_end2);
gt_alignment_reduction_filter(alignment_dst_end1,alignment_src_end1,file_format);
gt_alignment_reduction_filter(alignment_dst_end2,alignment_src_end2,file_format);
}
} else {
GT_TEMPLATE_ITERATE_MMAP__ATTR(template_src,mmap,mmap_attributes) {
if (parameters.reduce_to_unique_strata >= 0 && (gt_template_get_uniq_degree(template_src) >= parameters.reduce_to_unique_strata)) {
gt_map** mmap_copy = gt_mmap_array_copy(mmap,__mmap_num_blocks);
gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates);
free(mmap_copy);
break;
}
if(gt_template_get_num_mmaps(template_src) >= parameters.reduce_to_unique) break;
gt_map** mmap_copy = gt_mmap_array_copy(mmap,__mmap_num_blocks);
gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates);
free(mmap_copy);
}
}
}
}
void gt_template_dna_filter(gt_template* const template_dst,gt_template* const template_src,const gt_file_format file_format) {
/*
* Filtering workflow
* (1) Pre-filtering steps
* (2) Filtering of maps (taking them into account individually)
* (3) Reduction of all maps (taking them into account as a whole)
* (4) Post-filtering steps
*/
GT_TEMPLATE_IF_SE_ALIGNMENT(template_src) {
GT_TEMPLATE_REDUCTION(template_src,alignment_src);
GT_TEMPLATE_REDUCTION(template_dst,alignment_dst);
gt_alignment_dna_filter(alignment_dst,alignment_src,file_format);
} else {
if (!gt_template_is_mapped(template_src)) {
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_src_end1,alignment_src_end2);
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_dst,alignment_dst_end1,alignment_dst_end2);
gt_alignment_dna_filter(alignment_dst_end1,alignment_src_end1,file_format);
gt_alignment_dna_filter(alignment_dst_end2,alignment_src_end2,file_format);
} else {
const uint64_t first_matching_distance = gt_counters_get_min_matching_strata(gt_template_get_counters_vector(template_src))-1;
const uint64_t max_mismatch_quality = gt_template_get_max_mismatch_quality(template_src);
// Reduction by unique level (can be calculated beforehand)
bool pick_only_first_map = false;
/*
* (1) Pre-filtering steps
*/
gt_map** first_mmap = NULL;
gt_mmap_attributes first_mmap_attributes = {0, 0, 0};
if (parameters.keep_first_map && gt_template_get_num_mmaps(template_src)>0) {
gt_mmap* const mmap = gt_template_get_mmap(template_src,0);
first_mmap = gt_mmap_array_copy(mmap->mmap,gt_template_get_num_blocks(template_src));
first_mmap_attributes = mmap->attributes;
}
/*
* (2) Filtering of maps
*/
GT_TEMPLATE_ITERATE_MMAP__ATTR(template_src,mmap,mmap_attributes) {
const int64_t current_stratum = parameters.no_penalty_for_splitmaps ?
gt_map_get_no_split_distance(mmap[0]) + gt_map_get_no_split_distance(mmap[1]):
gt_map_get_global_distance(mmap[0]) + gt_map_get_global_distance(mmap[1]);
if (parameters.max_strata_after_map >= 0.0 &&
(current_stratum-first_matching_distance) > gt_template_get_read_proportion(template_src,parameters.max_strata_after_map)) break;
// Check sequence name
if (parameters.map_ids!=NULL) {
if (!gt_filter_is_sequence_name_allowed(mmap[0]->seq_name)) continue;
if (!gt_filter_is_sequence_name_allowed(mmap[1]->seq_name)) continue;
}
// Check strata
if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
const int64_t total_distance = parameters.no_penalty_for_splitmaps ?
gt_map_get_no_split_distance(mmap[0]) + gt_map_get_no_split_distance(mmap[1]):
gt_map_get_global_distance(mmap[0]) + gt_map_get_global_distance(mmap[1]);
if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance < gt_template_get_read_proportion(template_src,parameters.min_event_distance)) continue;
}
if (parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance > gt_template_get_read_proportion(template_src,parameters.max_event_distance)) continue;
}
}
// Check levenshtein distance
if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
const int64_t total_distance = gt_map_get_global_levenshtein_distance(mmap[0])+gt_map_get_global_levenshtein_distance(mmap[1]);
if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance < gt_template_get_read_proportion(template_src,parameters.min_levenshtein_distance)) continue;
}
if (parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance > gt_template_get_read_proportion(template_src,parameters.max_levenshtein_distance)) continue;
}
}
// Check inss
if (parameters.min_inss > INT64_MIN || parameters.max_inss < INT64_MAX) {
gt_status error_code;
const int64_t inss = gt_template_get_insert_size(mmap,&error_code,0,0);
if (parameters.min_inss > inss || inss > parameters.max_inss) continue;
}
// Check strandness
if (parameters.filter_by_strand_se) {
if (!parameters.allow_strand_f && (mmap[0]->strand==FORWARD || mmap[1]->strand==FORWARD)) continue;
if (!parameters.allow_strand_r && (mmap[0]->strand==REVERSE || mmap[1]->strand==REVERSE)) continue;
}
if (parameters.filter_by_strand_pe) {
if (mmap[0]->strand==FORWARD && mmap[1]->strand==REVERSE && !parameters.allow_strand_fr) continue;
if (mmap[0]->strand==REVERSE && mmap[1]->strand==FORWARD && !parameters.allow_strand_rf) continue;
if (mmap[0]->strand==FORWARD && mmap[1]->strand==FORWARD && !parameters.allow_strand_ff) continue;
if (mmap[0]->strand==REVERSE && mmap[1]->strand==REVERSE && !parameters.allow_strand_rr) continue;
}
// Filter quality scores
if (parameters.quality_score_ranges!=NULL) {
if (!gt_filter_is_quality_value_allowed((file_format==SAM) ? mmap_attributes->phred_score : mmap_attributes->gt_score)) continue;
}
/*
* (3) Reduction of all maps
*/
if (parameters.reduce_by_quality >= 0) {
const int64_t q = gt_alignment_sum_mismatch_qualities(gt_template_get_block(template_src,0), mmap[0]) +
gt_alignment_sum_mismatch_qualities(gt_template_get_block(template_src,1), mmap[1]);
if (q!=0 && q!=max_mismatch_quality && abs(max_mismatch_quality-q)<=parameters.reduce_by_quality) continue;
}
/*
* Insert the map
*/
gt_map** mmap_copy = gt_mmap_array_copy(mmap,__mmap_num_blocks);
gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates);
free(mmap_copy);
// Skip the rest if first map is enabled
if (parameters.first_map || pick_only_first_map) break;
}
/*
* (4) Post-filtering steps
*/
if (parameters.keep_first_map) {
if (gt_template_get_num_mmaps(template_dst)==0) {
gt_template_insert_mmap(template_dst,first_mmap,&first_mmap_attributes, parameters.check_duplicates);
}
free(first_mmap);
}
}
}
}
void gt_alignment_rna_filter(gt_alignment* const alignment_dst,gt_alignment* const alignment_src,const gt_file_format file_format) {
GT_ALIGNMENT_ITERATE(alignment_src,map) {
// Check sequence name
if (parameters.map_ids!=NULL) {
if (!gt_filter_is_sequence_name_allowed(map->seq_name)) continue;
}
// Check SM contained
const uint64_t num_blocks = gt_map_get_num_blocks(map);
if (parameters.no_split_maps && num_blocks>1) continue;
if (parameters.only_split_maps && num_blocks==1) continue;
// Filter intron length
if (parameters.min_intron_length > 0) {
if (gt_map_get_num_blocks(map) > 1) {
if(gt_map_get_min_intron_length(map) < parameters.min_intron_length){
continue;
}
}
}
// Filter block length
if (parameters.min_block_length > 0) {
if (gt_map_get_num_blocks(map) > 1) {
if (gt_map_get_min_block_length(map) < parameters.min_block_length) continue;
}
}
// Insert the map
gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates);
// Skip the rest if best
if (parameters.first_map) return;
}
}
void gt_template_rna_filter(gt_template* const template_dst,gt_template* const template_src,const gt_file_format file_format) {
GT_TEMPLATE_IF_SE_ALIGNMENT(template_src) {
GT_TEMPLATE_REDUCTION(template_src,alignment_src);
GT_TEMPLATE_REDUCTION(template_dst,alignment_dst);
/*
* SE
*/
gt_alignment_rna_filter(alignment_dst,alignment_src,file_format);
} else {
/*
* PE
*/
if (!gt_template_is_mapped(template_src)) {
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_src_end1,alignment_src_end2);
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_dst,alignment_dst_end1,alignment_dst_end2);
gt_alignment_rna_filter(alignment_dst_end1,alignment_src_end1,file_format);
gt_alignment_rna_filter(alignment_dst_end2,alignment_src_end2,file_format);
} else {
const uint64_t num_blocks = gt_template_get_num_blocks(template_src);
GT_TEMPLATE_ITERATE_MMAP__ATTR(template_src,mmap,mmap_attributes) {
// Check SM contained and get minimum intron length
uint64_t has_sm = false;
uint64_t min_intron_length = UINT64_MAX, min_block_length = UINT64_MAX;
if (parameters.no_split_maps || parameters.only_split_maps || parameters.min_intron_length != UINT64_MAX) {
GT_MMAP_ITERATE(mmap,map,end_p) {
if (gt_map_get_num_blocks(map) > 1) {
const uint64_t mil = gt_map_get_min_intron_length(map);
const uint64_t mbl = gt_map_get_min_block_length(map);
has_sm = true;
if (mil != UINT64_MAX && mil < min_intron_length) min_intron_length = mil;
if (mbl != UINT64_MAX && mbl < min_block_length) min_block_length = mbl;
}
}
}
if (parameters.no_split_maps && has_sm) continue;
if (parameters.only_split_maps && !has_sm) continue;
// Filter intron length
if (parameters.min_intron_length > 0 && min_intron_length != UINT64_MAX){
if(min_intron_length < parameters.min_intron_length){
continue;
}
}
// Filter block length
if (parameters.min_block_length > 0 && min_block_length != UINT64_MAX){
if(min_block_length < parameters.min_block_length) continue;
}
// Add the mmap
gt_map** mmap_copy = gt_mmap_array_copy(mmap,num_blocks);
gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates);
free(mmap_copy);
// Skip the rest if best
if (parameters.first_map) return;
}
}
}
}
GT_INLINE bool gt_filter_apply_filters(
const gt_file_format file_format,const uint64_t line_no,
gt_sequence_archive* const sequence_archive,gt_template* const template) {
/*
* Recalculate counters without penalty for splitmaps
*/
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}
/*
* Process Read/Qualities // TODO: move out of filter (this is processing)
*/
const uint64_t has_qualities = gt_template_has_qualities(template);
if (parameters.remove_qualities && has_qualities) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
gt_string_clear(alignment->qualities);
}
} else if (parameters.add_qualities && !has_qualities) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
const uint64_t read_length = gt_alignment_get_read_length(alignment);
gt_string_resize(alignment->qualities,read_length+1);
gt_string_set_length(alignment->qualities,read_length);
GT_STRING_ITERATE(alignment->qualities,buffer,i) {
buffer[i]='~';
}
}
}
if (parameters.uniform_read) {
if (parameters.uniform_read_strict) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
gt_dna_read_uniform_strict_content(alignment->read,alignment->qualities);
}
} else {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
gt_dna_read_uniform_content(alignment->read,alignment->qualities);
}
}
}
if (has_qualities) {
if (parameters.qualities_to_offset_33) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
gt_qualities_adapt_from_offset64_to_offset33(alignment->qualities);
}
}
if (parameters.qualities_to_offset_64) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
gt_qualities_adapt_from_offset33_to_offset64(alignment->qualities);
}
}
}
/*
* Template/Alignment Filter
*/
// Consider mapped/unmapped
const bool is_mapped = gt_template_is_mapped(template);
if (parameters.mapped && !is_mapped) return false;
if (parameters.unmapped && is_mapped) return false;
// Unique based filtering
if (parameters.unique_level>=0.0 && is_mapped) {
if (parameters.unique_level > gt_template_get_uniq_degree(template)) return false;
}
// Filter by read length
if (parameters.min_length>=0.0 || parameters.max_length>=0.0) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
const uint64_t read_length = gt_alignment_get_read_length(alignment);
if (parameters.min_length>=0.0) {
const uint64_t min_length = gt_alignment_get_read_proportion(alignment,parameters.min_length);
if (read_length < min_length) return false;
}
if (parameters.max_length>=0.0) {
const uint64_t max_length = gt_alignment_get_read_proportion(alignment,parameters.max_length);
if (read_length > max_length) return false;
}
}
}
// Filter by number of maps
if (parameters.min_maps>=0 || parameters.max_maps>=0) {
const uint64_t num_maps = gt_template_get_num_mmaps(template);
if (parameters.min_maps>=0 && num_maps<parameters.min_maps) return false;
if (parameters.max_maps>=0 && num_maps>parameters.max_maps) return false;
}
/*
* MAP Filter
*/
// Trim
if (parameters.hard_trim) {
gt_template_hard_trim(template,parameters.left_trim,parameters.right_trim);
gt_template_recalculate_counters(template);
} else if (parameters.restore_trim) {
gt_template_restore_trim(template);
gt_template_recalculate_counters(template);
}
// (Re)Align
if (parameters.realign_levenshtein) {
gt_template_realign_levenshtein(template,sequence_archive);
} else if (parameters.realign_hamming) {
gt_template_realign_hamming(template,sequence_archive);
} else if (parameters.mismatch_recovery) {
gt_filter_mismatch_recovery_maps(parameters.name_input_file,line_no,template,sequence_archive);
}
// check the split-map pairs for all paired alignments and
// remove mapping pairs where the split are not coherent
if(gt_template_get_num_blocks(template) == 2 && gt_template_is_mapped(template)){
gt_template *template_filtered = gt_template_dup(template,false,false);
const uint64_t num_blocks = gt_template_get_num_blocks(template);
GT_TEMPLATE_ITERATE_MMAP__ATTR_(template,mmap,mmap_attributes) {
if (mmap[0] == NULL || mmap[1] == NULL || !gt_filter_are_overlapping_pairs_coherent(mmap))continue;
gt_map** mmap_copy = gt_mmap_array_copy(mmap,num_blocks);
gt_template_insert_mmap(template_filtered,mmap_copy,mmap_attributes, parameters.check_duplicates);
free(mmap_copy);
}
gt_template_swap(template,template_filtered);
gt_template_delete(template_filtered);
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}else{
gt_template_recalculate_counters(template);
}
}
// Map DNA-filtering
uint64_t num_maps = gt_filter_get_num_maps(template);
if (parameters.perform_dna_map_filter && (!parameters.keep_unique || num_maps > 1)) {
gt_template *template_filtered = gt_template_dup(template,false,false);
gt_template_dna_filter(template_filtered,template,file_format);
// if keep_unique is on, we only flip if we have at least one
// alignment left
if(!parameters.keep_unique || gt_filter_get_num_maps(template_filtered) > 0){
gt_template_swap(template,template_filtered);
}
gt_template_delete(template_filtered);
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}else{
gt_template_recalculate_counters(template);
}
}
// Map RNA-filtering
num_maps = gt_filter_get_num_maps(template);
if (parameters.perform_rna_map_filter && (!parameters.keep_unique || num_maps > 1)) {
gt_template *template_filtered = gt_template_dup(template,false,false);
gt_template_rna_filter(template_filtered,template,file_format);
// if keep_unique is on, we only flip if we have at least one
// alignment left
if(!parameters.keep_unique || gt_filter_get_num_maps(template_filtered) > 0){
gt_template_swap(template,template_filtered);
}
// delete filtered and recalculate counters
gt_template_delete(template_filtered);
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}else{
gt_template_recalculate_counters(template);
}
}
// Map Annotation-filtering
num_maps = gt_filter_get_num_maps(template);
if (parameters.gtf != NULL && parameters.perform_annotation_filter && num_maps > 1) {
gt_template *template_filtered = gt_template_dup(template,false,false);
bool filtered = gt_filter_make_reduce_by_annotation(template_filtered,template);
if(filtered && (!parameters.keep_unique || gt_filter_get_num_maps(template_filtered) > 0)){
gt_template_swap(template,template_filtered);
}
gt_template_delete(template_filtered);
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}else{
gt_template_recalculate_counters(template);
}
}
// reduce by level filter
num_maps = gt_filter_get_num_maps(template);
if ((parameters.reduce_to_unique_strata >= 0 || parameters.reduce_to_unique != UINT64_MAX|| parameters.reduce_to_pairs) && (num_maps > 1)) {
gt_template *template_filtered = gt_template_dup(template,false,false);
gt_template_reduction_filter(template_filtered,template,file_format);
gt_template_swap(template,template_filtered);
gt_template_delete(template_filtered);
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}else{
gt_template_recalculate_counters(template);
}
}
// Map pruning
if (parameters.matches_pruning) gt_filter_prune_matches(template);
// Make counters
if (parameters.make_counters || parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters(template);
}
// Ok, go on
return true;
}
GT_INLINE void gt_filter__print(
const gt_file_format file_format,const uint64_t line_no,
gt_sequence_archive* const sequence_archive,gt_template* const template,
uint64_t* const total_algs_checked,uint64_t* const total_algs_correct,
uint64_t* const total_maps_checked,uint64_t* const total_maps_correct,
gt_buffered_output_file* const buffered_output,gt_generic_printer_attributes* const generic_printer_attributes,
gt_buffered_output_file* const buffered_discarded_output,gt_generic_printer_attributes* const discarded_output_attributes) {
bool discaded = false;
/*
* Apply Filters
*/
if (!gt_filter_apply_filters(file_format,line_no,sequence_archive,template)) discaded = true;
if (parameters.uniform_read) { // Check zero-length reads
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
if (gt_alignment_get_read_length(alignment)==0) return;
}
}
/*
* Check
*/
if (!discaded && parameters.check) {
if (!gt_filter_check_maps(parameters.name_input_file,line_no,
template,sequence_archive,total_algs_checked,total_algs_correct,total_maps_checked,total_maps_correct)) discaded = true;
}
/*
* Print template
*/
if (!parameters.no_output && !discaded) {
if (gt_output_generic_bofprint_template(buffered_output,template,generic_printer_attributes)) {
gt_error_msg("Fatal error outputting read '"PRIgts"'(InputLine:%"PRIu64")\n",
PRIgts_content(gt_template_get_string_tag(template)),line_no);
}
} else if (discaded && buffered_discarded_output!=NULL) {
if (gt_output_generic_bofprint_template(buffered_discarded_output,template,discarded_output_attributes)) {
gt_error_msg("Fatal error outputting read '"PRIgts"'(InputLine:%"PRIu64")\n",
PRIgts_content(gt_template_get_string_tag(template)),line_no);
}
}
}
/*
* Special funcionality
*/
GT_INLINE void gt_filter_sample_read_print_fastq(
gt_buffered_output_file* const buffered_output,gt_string* const tag,gt_string* const read,gt_string* const qualities,
const bool print_segmented_read_info,const uint64_t segment_id,const uint64_t total_segments,
const uint64_t left_trim,const uint64_t right_trim,const uint64_t chunk_size) {
gt_bofprintf(buffered_output,"@"PRIgts,PRIgts_content(tag));
if (print_segmented_read_info) gt_output_bofprint_segmented_read_info(buffered_output,segment_id,total_segments); // Segmented Read
if (left_trim > 0) {
gt_bofprintf(buffered_output," lt:Z:%"PRIu64":"PRIgts":"PRIgts,left_trim,
PRIgts_range_content(read,0,left_trim),
PRIgts_range_content(qualities,0,left_trim)); // Left-trim
}
if (right_trim > 0) {
gt_bofprintf(buffered_output," rt:Z:%"PRIu64":"PRIgts":"PRIgts,right_trim,
PRIgts_range_content(read,left_trim+chunk_size,right_trim),
PRIgts_range_content(qualities,left_trim+chunk_size,right_trim)); // Right-trim
}
// Print READ + QUALITIES (trimmed)
gt_bofprintf(buffered_output,"\n"PRIgts"\n+\n"PRIgts"\n",
PRIgts_trimmed_content(read,left_trim,right_trim),
PRIgts_trimmed_content(qualities,left_trim,right_trim));
}
GT_INLINE void gt_filter_sample_read_print_fasta(
gt_buffered_output_file* const buffered_output,gt_string* const tag,gt_string* const read,
const bool print_segmented_read_info,const uint64_t segment_id,const uint64_t total_segments,
const uint64_t left_trim,const uint64_t right_trim,const uint64_t chunk_size) {
gt_bofprintf(buffered_output,">"PRIgts,PRIgts_content(tag));
if (print_segmented_read_info) gt_output_bofprint_segmented_read_info(buffered_output,segment_id,total_segments); // Segmented Read
if (left_trim > 0) {
gt_bofprintf(buffered_output," lt:Z:%"PRIu64":"PRIgts,left_trim,
PRIgts_range_content(read,0,left_trim)); // Left-trim
}
if (right_trim > 0) {
gt_bofprintf(buffered_output," rt:Z:%"PRIu64":"PRIgts,right_trim,
PRIgts_range_content(read,left_trim+chunk_size,right_trim)); // Right-trim
}
// Print READ (trimmed)
gt_bofprintf(buffered_output,"\n"PRIgts"\n",
PRIgts_trimmed_content(read,left_trim,right_trim));
}
GT_INLINE void gt_filter_group_reads() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Prepare out-printers
if (parameters.output_format==FILE_FORMAT_UNKNOWN) parameters.output_format = input_file->file_format; // Select output format
gt_generic_printer_attributes* const generic_printer_attributes = gt_generic_printer_attributes_new(parameters.output_format);
// SegmentedRead aux variables
gt_template* const group_template = gt_template_new();
uint64_t total_segments = 0, last_segment_id = 0;
GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) {
// Get group attribute
gt_segmented_read_info* const segmented_read_info = gt_attributes_get_segmented_read_info(template->attributes);
if (segmented_read_info==NULL) {
gt_filter_cond_fatal_error_msg(total_segments!=last_segment_id,
"Expected SegmentedRead Info => lastRead(%"PRIu64"/%"PRIu64")",last_segment_id,total_segments);
gt_template_restore_trim(template); // If any
GT_TEMPLATE_ITERATE_ALIGNMENT(group_template,alignment) {
gt_attributes_remove(alignment->attributes,GT_ATTR_ID_SEGMENTED_READ_INFO); // If any
}
gt_output_generic_bofprint_template(buffered_output,template,generic_printer_attributes); // Print it, as it is
} else {
// First, undo the trim
gt_template_restore_trim(template);
// Tackle the group merging
if (last_segment_id==total_segments) {
/*
* New group
*/
gt_filter_cond_fatal_error_msg(segmented_read_info->total_segments==0 || segmented_read_info->segment_id!=1,
"Wrong SegmentedRead Info (Zero reads in group or not properly sorted)");
gt_template_clear(group_template,true);
gt_template_copy(group_template,template,true,true);
total_segments = segmented_read_info->total_segments;
last_segment_id = segmented_read_info->segment_id;
} else if (segmented_read_info->segment_id==last_segment_id+1 && segmented_read_info->segment_id <= total_segments) {
/*
* Old group (Keep merging)
*/
gt_filter_cond_fatal_error_msg(!gt_string_equals(template->tag,group_template->tag),
"Wrong TAG in Segmented Reads Sequence ('"PRIgts"'/'"PRIgts"')",PRIgts_content(group_template->tag),PRIgts_content(template->tag));
gt_template_merge_template_mmaps(group_template,template);
last_segment_id = segmented_read_info->segment_id;
if (last_segment_id==total_segments) { // Close group
GT_TEMPLATE_ITERATE_ALIGNMENT(group_template,alignment) {
gt_attributes_remove(alignment->attributes,GT_ATTR_ID_SEGMENTED_READ_INFO); // If any
}
gt_output_generic_bofprint_template(buffered_output,group_template,generic_printer_attributes);
}
} else {
gt_filter_fatal_error_msg("Wrong SegmentedRead Info => Expected(%"PRIu64"/%"PRIu64")::Found(%"PRIu64"/%"PRIu64").",
segmented_read_info->segment_id,segmented_read_info->total_segments,last_segment_id,total_segments);
}
}
} GT_END_READING_WRITING_LOOP(input_file,output_file,template);
// Check proper end of merging groups
gt_filter_cond_fatal_error_msg(total_segments!=last_segment_id,
"Expected SegmentedRead Info => lastRead(%"PRIu64"/%"PRIu64")",last_segment_id,total_segments);
// Clean
gt_template_delete(group_template);
gt_generic_printer_attributes_delete(generic_printer_attributes);
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
GT_INLINE void gt_filter_sample_read() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Parallel I/O
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
// Calculate the chunks
const uint64_t read_length = gt_alignment_get_read_length(alignment);
const uint64_t split_chunk_size = gt_get_integer_proportion(parameters.split_chunk_size,read_length);
const uint64_t split_min_remainder = gt_get_integer_proportion(parameters.split_min_remainder,read_length);
// Check boundaries
if (split_chunk_size >= read_length || split_chunk_size <= split_min_remainder) {
if (gt_alignment_has_qualities(alignment)) {
gt_filter_sample_read_print_fastq(buffered_output,alignment->tag,alignment->read,alignment->qualities,false,1,1,0,0,read_length); // FASTQ
} else {
gt_filter_sample_read_print_fasta(buffered_output,alignment->tag,alignment->read,false,1,1,0,0,read_length); // FASTA
}
continue;
}
uint64_t split_step_size = gt_get_integer_proportion(parameters.split_step_size,read_length);
if (split_step_size==0) split_step_size=1;
const uint64_t split_left_trim = gt_get_integer_proportion(parameters.split_left_trim,read_length);
const uint64_t split_right_trim = gt_get_integer_proportion(parameters.split_right_trim,read_length);
const uint64_t full_chunks = ((read_length-split_left_trim-split_right_trim-split_chunk_size)/split_step_size)+1;
uint64_t total_chunks = full_chunks;
uint64_t left_trim=split_left_trim, right_trim=read_length-split_left_trim-split_chunk_size;
// Check last chunk (remainder)
const uint64_t last_left_trim = left_trim+(split_step_size*full_chunks);
const uint64_t remainder_chunk = read_length-split_right_trim-last_left_trim;
bool print_remainder_chunk = false;
if (remainder_chunk > 0 && split_min_remainder > 0 &&
remainder_chunk < split_chunk_size && remainder_chunk >= split_min_remainder) {
print_remainder_chunk = true; ++total_chunks;
}
uint64_t i;
for (i=0;i<full_chunks;++i,left_trim+=split_step_size,right_trim-=split_step_size) {
if (gt_alignment_has_qualities(alignment)) {
gt_filter_sample_read_print_fastq(
buffered_output,alignment->tag,alignment->read,alignment->qualities,true,
i+1,total_chunks,left_trim,right_trim,split_chunk_size); // FASTQ
} else {
gt_filter_sample_read_print_fasta(
buffered_output,alignment->tag,alignment->read,true,
i+1,total_chunks,left_trim,right_trim,split_chunk_size); // FASTA
}
}
// Print last chunk (remainder)
if (print_remainder_chunk) {
if (gt_alignment_has_qualities(alignment)) {
gt_filter_sample_read_print_fastq(
buffered_output,alignment->tag,alignment->read,alignment->qualities,true,
total_chunks,total_chunks,last_left_trim,split_right_trim,remainder_chunk); // FASTQ
} else {
gt_filter_sample_read_print_fasta(
buffered_output,alignment->tag,alignment->read,true,
total_chunks,total_chunks,last_left_trim,split_right_trim,remainder_chunk); // FASTA
}
}
}
} GT_END_READING_WRITING_LOOP(input_file,output_file,template);
}
// Clean
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
GT_INLINE void gt_filter_print_insert_size_distribution() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Parallel I/O
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) {
// Print insert size
if (gt_template_get_num_blocks(template)!=2) continue;
GT_TEMPLATE_ITERATE_(template,mmap) {
gt_status error_code;
gt_bofprintf(buffered_output,"%"PRIu64"\n",gt_template_get_insert_size(mmap,&error_code,0,0));
if (parameters.first_map) break;
}
} GT_END_READING_WRITING_LOOP(input_file,output_file,template);
}
// Clean
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
GT_INLINE void gt_filter_print_error_distribution() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Parallel I/O
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) {
// Print levenshtein distance of the maps
if (parameters.first_map) {
uint64_t best_distance = UINT64_MAX;
GT_TEMPLATE_ITERATE_(template,mmap) {
const uint64_t dist = gt_map_get_global_levenshtein_distance(*mmap);
if (dist < best_distance) best_distance = dist;
}
if (best_distance < UINT64_MAX) gt_bofprintf(buffered_output,"%"PRIu64"\n",best_distance);
} else {
GT_TEMPLATE_ITERATE_(template,mmap) {
gt_bofprintf(buffered_output,"%"PRIu64"\n",gt_map_get_global_levenshtein_distance(*mmap));
}
}
} GT_END_READING_WRITING_LOOP(input_file,output_file,template);
}
// Clean
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
/*
* Handler for opening an archive (GEMIndex/MULTIFastaFile)
*/
gt_sequence_archive* gt_filter_open_sequence_archive(const bool load_sequences) {
gt_sequence_archive* sequence_archive = NULL;
gt_log("Loading reference file ...");
if (parameters.name_gem_index_file!=NULL) { // Load GEM-IDX
sequence_archive = gt_sequence_archive_new(GT_BED_ARCHIVE);
gt_gemIdx_load_archive(parameters.name_gem_index_file,sequence_archive,load_sequences);
} else {
gt_input_file* const reference_file = gt_input_file_open(parameters.name_reference_file,false);
sequence_archive = gt_sequence_archive_new(GT_CDNA_ARCHIVE);
if (gt_input_multifasta_parser_get_archive(reference_file,sequence_archive)!=GT_IFP_OK) {
gt_fatal_error_msg("Error parsing reference file '%s'\n",parameters.name_reference_file);
}
gt_input_file_close(reference_file);
}
gt_log("Done.");
return sequence_archive;
}
GT_INLINE void gt_filter_display_sequence_list(){
// Show sequence archive summary
gt_sequence_archive* sequence_archive = gt_filter_open_sequence_archive(false);
gt_sequence_archive_iterator sequence_archive_it;
gt_sequence_archive_new_iterator(sequence_archive,&sequence_archive_it);
gt_segmented_sequence* seq;
while ((seq=gt_sequence_archive_iterator_next(&sequence_archive_it))) {
fprintf(stdout,"%s\t%"PRIu64"\n",seq->seq_name->buffer,seq->sequence_total_length);
}
}
/*
* I/O Filtering Loop
*/
#define GT_FILTER_CHECK_PARSING_ERROR(FORMAT) \
++record_num; \
if (error_code!=GT_IMP_OK) { \
gt_error_msg("[#%"PRIu64"]Fatal error parsing "FORMAT"file '%s', line %"PRIu64"\n", \
record_num,parameters.name_input_file,buffered_input->current_line_num-1); \
continue; \
}
void gt_filter_read__write() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* output_file, *dicarded_output_file;
// Open out file
if (!parameters.no_output) {
output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
if (parameters.discarded_output) {
if (gt_streq(parameters.name_discarded_output_file,"stdout")) {
dicarded_output_file = gt_output_stream_new(stdout,SORTED_FILE);
} else if (gt_streq(parameters.name_discarded_output_file,"stderr")) {
dicarded_output_file = gt_output_stream_new(stderr,SORTED_FILE);
} else {
dicarded_output_file = gt_output_file_new(parameters.name_discarded_output_file,SORTED_FILE);
}
}
}
// Open reference file
gt_sequence_archive* sequence_archive = NULL;
if (parameters.load_index) {
sequence_archive = gt_filter_open_sequence_archive(true);
}
// read annotaiton if specified
if (parameters.annotation != NULL && parameters.perform_annotation_filter) {
parameters.gtf = gt_gtf_read_from_file(parameters.annotation, parameters.num_threads);
}
// Parallel reading+process
uint64_t total_algs_checked=0, total_algs_correct=0, total_maps_checked=0, total_maps_correct=0;
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads) reduction(+:total_algs_checked,total_algs_correct,total_maps_checked,total_maps_correct)
#endif
{
// Prepare IN/OUT buffers & printers
gt_status error_code;
gt_buffered_input_file* buffered_input = gt_buffered_input_file_new(input_file);
gt_buffered_output_file *buffered_output = NULL, *buffered_discarded_output = NULL;
if (!parameters.no_output) {
buffered_output = gt_buffered_output_file_new(output_file);
gt_buffered_input_file_attach_buffered_output(buffered_input,buffered_output);
if (parameters.discarded_output) {
buffered_discarded_output = gt_buffered_output_file_new(dicarded_output_file);
gt_buffered_input_file_attach_buffered_output(buffered_input,buffered_discarded_output);
}
}
// Prepare IN/OUT parser/printer attributes
gt_generic_printer_attributes *generic_printer_attributes=NULL, *discarded_output_attributes=NULL;
if (parameters.output_format==FILE_FORMAT_UNKNOWN) parameters.output_format = input_file->file_format; // Select output format
generic_printer_attributes = gt_generic_printer_attributes_new(parameters.output_format);
if (parameters.discarded_output) {
gt_file_format output_format = input_file->file_format;
if (parameters.discarded_output_format!=FILE_FORMAT_UNKNOWN) output_format=parameters.discarded_output_format;
discarded_output_attributes = gt_generic_printer_attributes_new(output_format);
}
/*
* READ + PROCCESS Loop
*/
uint64_t record_num = 0;
gt_template* template = gt_template_new();
if (parameters.check_format && parameters.check_file_format==FASTA) {
/*
* FASTA I/O loop
*/
while ((error_code=gt_input_fasta_parser_get_template(buffered_input,template,parameters.paired_end))) {
GT_FILTER_CHECK_PARSING_ERROR("FASTA ");
// Apply all filters and print
gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template,
&total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct,
buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes);
}
} else if (parameters.check_format && parameters.check_file_format==MAP) {
/*
* MAP I/O loop
*/
gt_map_parser_attributes* const attr = gt_input_map_parser_attributes_new(parameters.paired_end);
while ((error_code=gt_input_map_parser_get_template(buffered_input,template,attr))) {
GT_FILTER_CHECK_PARSING_ERROR("MAP ");
// Apply all filters and print
gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template,
&total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct,
buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes);
}
gt_input_map_parser_attributes_delete(attr);
} else if (parameters.check_format && parameters.check_file_format==SAM) {
/*
* SAM I/O loop
*/
gt_sam_parser_attributes* const attr = gt_input_sam_parser_attributes_new();
while ((error_code=gt_input_sam_parser_get_template(buffered_input,template,attr))) {
GT_FILTER_CHECK_PARSING_ERROR("SAM ");
// Apply all filters and print
gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template,
&total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct,
buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes);
}
gt_input_sam_parser_attributes_delete(attr);
} else {
/*
* Generic I/O loop
*/
gt_generic_parser_attributes* generic_parser_attributes = gt_input_generic_parser_attributes_new(parameters.paired_end);
gt_input_map_parser_attributes_set_max_parsed_maps(generic_parser_attributes->map_parser_attributes,parameters.max_input_matches); // Limit max-matches
while ((error_code=gt_input_generic_parser_get_template(buffered_input,template,generic_parser_attributes))) {
GT_FILTER_CHECK_PARSING_ERROR("");
// Apply all filters and print
gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template,
&total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct,
buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes);
}
gt_input_generic_parser_attributes_delete(generic_parser_attributes);
}
// Clean
gt_template_delete(template);
gt_buffered_input_file_close(buffered_input);
gt_generic_printer_attributes_delete(generic_printer_attributes);
if (!parameters.no_output) {
gt_buffered_output_file_close(buffered_output);
if (parameters.discarded_output) gt_buffered_output_file_close(buffered_discarded_output);
}
}
/*
* Print check report
*/
if (parameters.check) {
gt_log("Checked %lu alignments. Total.Correct %lu (%2.3f %%). Total.Maps.Correct %lu (%2.3f %%)",
total_algs_checked,total_algs_correct,GT_GET_PERCENTAGE(total_algs_correct,total_algs_checked),
total_maps_correct,GT_GET_PERCENTAGE(total_maps_correct,total_maps_checked));
}
// Release archive & Clean
if (sequence_archive) gt_sequence_archive_delete(sequence_archive);
gt_filter_delete_map_ids(parameters.map_ids);
if (parameters.quality_score_ranges!=NULL) gt_vector_delete(parameters.quality_score_ranges);
gt_input_file_close(input_file);
if (!parameters.no_output) {
gt_output_file_close(output_file);
if (parameters.discarded_output) gt_output_file_close(dicarded_output_file);
}
}
/*
* Argument Parsing
*/
void gt_filter_get_coma_separated_arguments_long(char* const parameters_list,const uint64_t num_params,...) {
uint64_t num_params_parsed = 0;
// Start va_args
va_list v_args;
va_start(v_args,num_params);
// Start parsing
char *opt = strtok(parameters_list,",");
while (opt!=NULL && num_params_parsed<num_params) {
uint64_t* const uint64_arg = va_arg(v_args,uint64_t*);
*uint64_arg = atoll(opt);
opt = strtok(NULL,",");
}
// End va_args
va_end(v_args);
}
GT_INLINE uint64_t gt_filter_get_coma_separated_arguments_float(char* const parameters_list,const uint64_t num_params,...) {
uint64_t num_params_parsed = 0;
// Start va_args
va_list v_args;
va_start(v_args,num_params);
// Start parsing
char *opt = strtok(parameters_list,",");
while (opt!=NULL && num_params_parsed<num_params) {
float* const float_arg = va_arg(v_args,float*);
*float_arg = atof(opt);
opt = strtok(NULL,",");
++num_params_parsed;
}
// End va_args
va_end(v_args);
return num_params_parsed;
}
void gt_filter_get_discarded_output_arguments(char* const optarg) {
// Start parsing
char *opt = strtok(optarg,",");
parameters.name_discarded_output_file = opt;
opt = strtok(NULL,","); // Next
if (opt!=NULL) {
if (gt_streq(opt,"FASTA")) {
parameters.discarded_output_format = FASTA;
} else if (gt_streq(opt,"MAP")) {
parameters.discarded_output_format = MAP;
} else if (gt_streq(opt,"SAM")) {
parameters.discarded_output_format = SAM;
} else {
gt_fatal_error_msg("Output format '%s' not recognized",opt);
}
}
}
void gt_filter_get_argument_pair_strandness(char* const strandness_opt) {
char *opt;
opt = strtok(strandness_opt,",");
while (opt!=NULL) {
if (gt_streq(opt,"FR")) {
parameters.allow_strand_fr = true;
} else if (gt_streq(opt,"RF")) {
parameters.allow_strand_rf = true;
} else if (gt_streq(opt,"FF")) {
parameters.allow_strand_ff = true;
} else if (gt_streq(opt,"RR")) {
parameters.allow_strand_rr = true;
} else {
gt_fatal_error_msg("Strandedness option not recognized '%s'\n",opt);
}
opt = strtok(NULL,","); // Reload
}
parameters.filter_by_strand_pe = true;
}
void gt_filter_get_argument_map_id(char* const maps_ids) {
// Allocate vector
parameters.map_ids = gt_vector_new(20,sizeof(gt_string*));
// Add all the valid map Ids (sequence names)
char *opt;
opt = strtok(maps_ids,",");
while (opt!=NULL) {
// Get id
gt_string* map_id = gt_string_new(0);
gt_string_set_string(map_id,opt);
// Add to the vector
gt_vector_insert(parameters.map_ids,map_id,gt_string*);
// Next
opt = strtok(NULL,","); // Reload
}
}
void gt_filter_get_argument_gtf_type(char* const maps_ids) {
// Allocate vector
parameters.gtf_types = gt_shash_new();
// Add all the valid map Ids (sequence names)
char *opt;
opt = strtok(maps_ids,",");
while (opt!=NULL) {
// Get id
gt_shash_insert(parameters.gtf_types, opt, true, bool);
// Next
opt = strtok(NULL,","); // Reload
}
}
void parse_arguments(int argc,char** argv) {
struct option* gt_filter_getopt = gt_options_adaptor_getopt(gt_filter_options);
gt_string* const gt_filter_short_getopt = gt_options_adaptor_getopt_short(gt_filter_options);
int option, option_index;
while (true) {
// Get option & Select case
if ((option=getopt_long(argc,argv,
gt_string_get_string(gt_filter_short_getopt),gt_filter_getopt,&option_index))==-1) break;
switch (option) {
/* I/O */
case 'i':
parameters.name_input_file = optarg;
break;
case 'o':
parameters.name_output_file = optarg;
if (gt_streq(optarg,"null")) parameters.no_output = true;
break;
case 'r':
parameters.name_reference_file = optarg;
break;
case 'I':
parameters.name_gem_index_file = optarg;
break;
case 200: // annotation
parameters.annotation = optarg;
break;
case 201:
parameters.mmap_input = true;
break;
case 'p':
parameters.paired_end = true;
break;
case 202: // output-format
if (gt_streq(optarg,"FASTA")) {
parameters.output_format = FASTA;
} else if (gt_streq(optarg,"MAP")) {
parameters.output_format = MAP;
} else if (gt_streq(optarg,"SAM")) {
parameters.output_format = SAM;
} else {
gt_fatal_error_msg("Output format '%s' not recognized",optarg);
}
break;
case 203: // discarded-output
parameters.discarded_output = true;
gt_filter_get_discarded_output_arguments(optarg);
break;
case 204: // no-output
parameters.no_output = true;
break;
case 205: // check-duplicates
parameters.check_duplicates = true;
break;
/* Filter Read/Qualities */
case 300: // hard-trim
parameters.hard_trim = true;
gt_filter_get_coma_separated_arguments_long(optarg,2,&(parameters.left_trim),&(parameters.right_trim));
break;
case 301: // quality-trim
gt_fatal_error(NOT_IMPLEMENTED);
break;
case 302: // restore-trim
parameters.restore_trim = true;
break;
case 303: // uniform-read
parameters.uniform_read = true;
if (optarg && gt_streq(optarg,"strict")) parameters.uniform_read_strict = true;
break;
case 304: // qualities-to-offset-33
parameters.qualities_to_offset_33 = true;
break;
case 305: // qualities-to-offset-64
parameters.qualities_to_offset_64 = true;
break;
case 306: // remove-qualities
parameters.remove_qualities = true;
break;
case 307: // add-qualities
parameters.add_qualities = true;
break;
/* Filter Template/Alignments */
case 400:
parameters.mapped = true;
break;
case 401:
parameters.unmapped = true;
break;
case 402:
parameters.unique_level = atoll(optarg);
break;
case 403:
parameters.min_length = atof(optarg);
break;
case 404:
parameters.max_length = atof(optarg);
break;
case 405:
parameters.min_maps = atof(optarg);
break;
case 406:
parameters.max_maps = atof(optarg);
break;
/* Filter Maps */
case 500: // first-map
parameters.perform_dna_map_filter = true;
parameters.first_map = true;
break;
case 'k': // keep-first-map
parameters.keep_first_map = true;
break;
case 'u': // keep-unique
parameters.keep_unique = true;
break;
case 'd': // max-decoded-matches
parameters.matches_pruning = true;
parameters.max_decoded_matches = atoll(optarg);
break;
case 'D': // min-decoded-strata
parameters.matches_pruning = true;
parameters.min_decoded_strata = atoll(optarg);
break;
case 501: // max-output-matches
parameters.matches_pruning = true;
parameters.max_output_matches = atoll(optarg);
break;
case 502: // max-input-matches
parameters.max_input_matches = atoll(optarg);
break;
case 503: // max-strata-after-map
parameters.perform_dna_map_filter = true;
parameters.max_strata_after_map = atof(optarg);
break;
case 504: // make-counters
parameters.make_counters = true;
break;
case 505: // min-strata
parameters.perform_dna_map_filter = true;
parameters.min_event_distance = atof(optarg);
break;
case 506: // max-strata
parameters.perform_dna_map_filter = true;
parameters.max_event_distance = atof(optarg);
break;
case 507: // min-levenshtein-error
parameters.perform_dna_map_filter = true;
parameters.min_levenshtein_distance = atof(optarg);
break;
case 508: // max-levenshtein-error
parameters.perform_dna_map_filter = true;
parameters.max_levenshtein_distance = atof(optarg);
break;
case 509: // map-id
parameters.perform_dna_map_filter = true;
gt_filter_get_argument_map_id(optarg);
break;
case 510: // strandedness
parameters.perform_dna_map_filter = true;
parameters.filter_by_strand_se = true;
if (gt_streq(optarg,"F")) {
parameters.allow_strand_f = true;
} else if (gt_streq(optarg,"R")) {
parameters.allow_strand_r = true;
} else {
gt_fatal_error_msg("Strand '%s' not recognized {'F','R'}",optarg);
}
break;
case 511: // filter-quality
parameters.perform_dna_map_filter = true;
gt_filter_quality_range qrange;
gt_filter_get_coma_separated_arguments_long(optarg,2,&(qrange.min),&(qrange.max));
// Add it to the vector of ranges
if (parameters.quality_score_ranges==NULL) {
parameters.quality_score_ranges = gt_vector_new(4,sizeof(gt_filter_quality_range));
}
gt_vector_insert(parameters.quality_score_ranges,qrange,gt_filter_quality_range);
break;
case 512: // reduce-to-level
parameters.perform_dna_map_filter = true;
parameters.reduce_to_unique_strata = atol(optarg);
break;
case 513: // reduce-by-quality
parameters.perform_dna_map_filter = true;
parameters.reduce_by_quality = atol(optarg);
break;
case 514: // reduce-by-annotation
parameters.reduce_by_gene_id = true;
parameters.perform_annotation_filter = true;
break;
case 515: // reduce-to-unique
parameters.reduce_to_unique = atol(optarg);
break;
case 516: // reduce-to-pairs
parameters.reduce_to_pairs = true;
break;
case 517: // reduce-to-protein-coding
parameters.reduce_to_protein_coding = true;
parameters.perform_annotation_filter = true;
break;
case 518: // reduce-by_junctions
parameters.reduce_by_junctions = true;
parameters.perform_annotation_filter = true;
break;
/* Filter RNA-Maps */
case 600: // no-split-maps
parameters.no_split_maps = true;
parameters.perform_rna_map_filter = true;
break;
case 601: // only-split-maps
parameters.only_split_maps = true;
parameters.perform_rna_map_filter = true;
break;
case 's': // no-penalty-for-splitmaps
parameters.no_penalty_for_splitmaps = true;
break;
case 603: // min-intron-length
parameters.min_intron_length = atol(optarg);
parameters.perform_rna_map_filter = true;
break;
case 604: // min-block-length
parameters.min_block_length = atol(optarg);
parameters.perform_rna_map_filter = true;
break;
/* Filter PE-Maps */
case 700: // pair-strandness
parameters.perform_dna_map_filter = true;
gt_filter_get_argument_pair_strandness(optarg);
break;
case 701: // min-inss
parameters.perform_dna_map_filter = true;
parameters.min_inss = atoll(optarg);
break;
case 702: // max-inss
parameters.perform_dna_map_filter = true;
parameters.max_inss = atoll(optarg);
break;
/* Realign/Check */
case 800: // mismatch-recovery
parameters.load_index = true;
parameters.mismatch_recovery = true;
break;
case 801: // hamming-realign
parameters.load_index = true;
parameters.realign_hamming = true;
break;
case 802: // levenshtein-realign
parameters.load_index = true;
parameters.realign_levenshtein = true;
break;
/* Checking/Report */
case 'c': // check
parameters.load_index = true;
parameters.check = true;
break;
case 'C': // check-only
parameters.load_index = true;
parameters.check = true;
parameters.no_output = true;
break;
case 803: // check-format
parameters.check_format = true;
if (gt_streq(optarg,"FASTA")) {
parameters.check_file_format = FASTA;
} else if (gt_streq(optarg,"MAP")) {
parameters.check_file_format = MAP;
} else if (gt_streq(optarg,"SAM")) {
parameters.check_file_format = SAM;
} else {
gt_fatal_error_msg("Check format '%s' not recognized",optarg);
}
break;
/* Split/Grouping */
case 900: // split-read
gt_fatal_error(NOT_IMPLEMENTED);
break;
case 901: // sample-read
parameters.special_functionality = true;
parameters.sample_read = true;
gt_cond_fatal_error_msg(gt_filter_get_coma_separated_arguments_float(optarg,5,
&(parameters.split_chunk_size),&(parameters.split_step_size),
&(parameters.split_left_trim),&(parameters.split_right_trim),
&(parameters.split_min_remainder))<4,
"Too few parameters provided to option --split-read");
break;
case 902: // group-read-chunks
parameters.special_functionality = true;
parameters.group_reads = true;
break;
/* Display/Information */
case 1000:
parameters.special_functionality = true;
parameters.error_plot = true;
break;
case 1001:
parameters.special_functionality = true;
parameters.insert_size_plot = true;
break;
case 1002:
parameters.special_functionality = true;
parameters.load_index = true;
parameters.show_sequence_list = true;
break;
case 1003:
parameters.special_functionality = true;
parameters.load_index = true;
parameters.display_pretty = true;
break;
/* Misc */
case 't': // threads
#ifdef HAVE_OPENMP
parameters.num_threads = atol(optarg);
#endif
gt_cond_fatal_error_msg(parameters.num_threads > GT_MAX_OUTPUT_BUFFERS,
"Excessive number of threads (maximum %"PRId32")",GT_MAX_OUTPUT_BUFFERS);
break;
case 'v': // verbose
parameters.verbose = true;
break;
case 'h': // help
fprintf(stderr, "USE: ./gt.filter [ARGS]...\n");
gt_options_fprint_menu(stderr,gt_filter_options,gt_filter_groups,false,false);
exit(1);
case 'H': // full-help
fprintf(stderr, "USE: ./gt.filter [ARGS]...\n");
gt_options_fprint_menu(stderr,gt_filter_options,gt_filter_groups,false,true);
exit(1);
case 'J':
gt_options_fprint_json_menu(stderr,gt_filter_options,gt_filter_groups,true,false);
exit(1);
break;
case '?':
default:
gt_fatal_error_msg("Option not recognized");
}
}
/*
* Parameters check
*/
if (parameters.load_index && parameters.name_reference_file==NULL && parameters.name_gem_index_file==NULL) {
gt_fatal_error_msg("Reference file required");
}
// Free
gt_string_delete(gt_filter_short_getopt);
}
/*
* Main
*/
int main(int argc,char** argv) {
// GT error handler
gt_handle_error_signals();
// Parsing command-line options
parse_arguments(argc,argv);
/*
* Select functionality
*/
if (parameters.show_sequence_list) {
gt_filter_display_sequence_list();
} else if (parameters.group_reads) {
gt_filter_group_reads();
} else if (parameters.sample_read) {
gt_filter_sample_read();
// Depreciated
} else if (parameters.error_plot) {
gt_filter_print_insert_size_distribution();
} else if (parameters.insert_size_plot) {
gt_filter_print_error_distribution();
// Depreciated
} else {
gt_filter_read__write(); // Filter !!
}
return 0;
}
|
GB_binop__div_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__div_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__div_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__div_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int32)
// A*D function (colscale): GB (_AxD__div_int32)
// D*A function (rowscale): GB (_DxB__div_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__div_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__div_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int32)
// C=scalar+B GB (_bind1st__div_int32)
// C=scalar+B' GB (_bind1st_tran__div_int32)
// C=A+scalar GB (_bind2nd__div_int32)
// C=A'+scalar GB (_bind2nd_tran__div_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (x, y, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT32 || GxB_NO_DIV_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 32) ; \
}
GrB_Info GB (_bind1st_tran__div_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 32) ; \
}
GrB_Info GB (_bind2nd_tran__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fci_contract_nosym.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*
* Paticle permutation symmetry for 2e Hamiltonian only
* h2e[i,j,k,l] == h2e[k,l,i,j]
* h2e[i,j,k,l] =/= h2e[j,i,k,l] =/= h2e[i,j,l,k] ...
*/
#include <stdlib.h>
#include <string.h>
//#include <omp.h>
#include "config.h"
#include "vhf/fblas.h"
#include "fci.h"
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define CSUMTHR 1e-28
#define STRB_BLKSIZE 112
double FCI_t1ci_sf(double *ci0, double *t1, int bcount,
int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb);
void FCIcontract_a_1e_nosym(double *h1e, double *ci0, double *ci1,
int norb, int nstra, int nstrb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
int j, k, i, a, sign;
size_t str0, str1;
double *pci0, *pci1;
double tmp;
_LinkT *tab;
_LinkT *clink = malloc(sizeof(_LinkT) * nlinka * nstra);
FCIcompress_link(clink, link_indexa, norb, nstra, nlinka);
for (str0 = 0; str0 < nstra; str0++) {
tab = clink + str0 * nlinka;
for (j = 0; j < nlinka; j++) {
a = EXTRACT_CRE (tab[j]); // propagate from t1 to bra, through a^+ i
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
pci0 = ci0 + str0 * nstrb;
pci1 = ci1 + str1 * nstrb;
tmp = sign * h1e[a*norb+i];
for (k = 0; k < nstrb; k++) {
pci1[k] += tmp * pci0[k];
}
}
}
free(clink);
}
void FCIcontract_b_1e_nosym(double *h1e, double *ci0, double *ci1,
int norb, int nstra, int nstrb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
int j, k, i, a, sign;
size_t str0, str1;
double *pci1;
double tmp;
_LinkT *tab;
_LinkT *clink = malloc(sizeof(_LinkT) * nlinkb * nstrb);
FCIcompress_link(clink, link_indexb, norb, nstrb, nlinkb);
for (str0 = 0; str0 < nstra; str0++) {
pci1 = ci1 + str0 * nstrb;
for (k = 0; k < nstrb; k++) {
tab = clink + k * nlinkb;
tmp = ci0[str0*nstrb+k];
for (j = 0; j < nlinkb; j++) {
a = EXTRACT_CRE (tab[j]);
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
pci1[str1] += sign * tmp * h1e[a*norb+i];
}
}
}
free(clink);
}
static void spread_a_t1(double *ci1, double *t1,
int bcount, int stra_id, int strb_id,
int norb, int nstrb, int nlinka, _LinkT *clink_indexa)
{
ci1 += strb_id;
const int nnorb = norb * norb;
int j, k, i, a, str1, sign;
const _LinkT *tab = clink_indexa + stra_id * nlinka;
double *cp0, *cp1;
for (j = 0; j < nlinka; j++) {
a = EXTRACT_CRE (tab[j]);
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
cp0 = t1 + a*norb+i; // propagate from t1 to bra, through a^+ i
cp1 = ci1 + str1*(size_t)nstrb;
if (sign > 0) {
for (k = 0; k < bcount; k++) {
cp1[k] += cp0[k*nnorb];
}
} else {
for (k = 0; k < bcount; k++) {
cp1[k] -= cp0[k*nnorb];
}
}
}
}
static void spread_b_t1(double *ci1, double *t1,
int bcount, int stra_id, int strb_id,
int norb, int nstrb, int nlinkb, _LinkT *clink_indexb)
{
const int nnorb = norb * norb;
int j, i, a, str0, str1, sign;
const _LinkT *tab = clink_indexb + strb_id * nlinkb;
double *pci = ci1 + stra_id * (size_t)nstrb;
for (str0 = 0; str0 < bcount; str0++) {
for (j = 0; j < nlinkb; j++) {
a = EXTRACT_CRE (tab[j]);
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
// propagate from t1 to bra, through a^+ i
pci[str1] += sign * t1[a*norb+i];
}
t1 += nnorb;
tab += nlinkb;
}
}
static void ctr_rhf2e_kern(double *eri, double *ci0, double *ci1,
double *ci1buf, double *t1buf,
int bcount_for_spread_a, int ncol_ci1buf,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * norb;
double *t1 = t1buf;
double *vt1 = t1buf + nnorb*bcount;
double csum;
csum = FCI_t1ci_sf(ci0, t1, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb,
clink_indexa, clink_indexb);
if (csum > CSUMTHR) {
dgemm_(&TRANS_N, &TRANS_N, &nnorb, &bcount, &nnorb,
&D1, eri, &nnorb, t1, &nnorb,
&D0, vt1, &nnorb);
spread_b_t1(ci1, vt1, bcount, stra_id, strb_id,
norb, nb, nlinkb, clink_indexb);
spread_a_t1(ci1buf, vt1, bcount_for_spread_a, stra_id, 0,
norb, ncol_ci1buf, nlinka, clink_indexa);
}
}
static void axpy2d(double *out, double *in, int count, int no, int ni)
{
int i, j;
for (i = 0; i < count; i++) {
for (j = 0; j < ni; j++) {
out[i*no+j] += in[i*ni+j];
}
}
}
void FCIcontract_2es1(double *eri, double *ci0, double *ci1,
int norb, int na, int nb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
_LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na);
_LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb);
FCIcompress_link(clinka, link_indexa, norb, na, nlinka);
FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb);
memset(ci1, 0, sizeof(double)*na*nb);
#pragma omp parallel default(none) \
shared(eri, ci0, ci1, norb, na, nb, nlinka, nlinkb, \
clinka, clinkb)
{
int strk, ib, blen;
double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*norb*2+2));
double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2));
for (ib = 0; ib < nb; ib += STRB_BLKSIZE) {
blen = MIN(STRB_BLKSIZE, nb-ib);
memset(ci1buf, 0, sizeof(double) * na*blen);
#pragma omp for schedule(static)
for (strk = 0; strk < na; strk++) {
ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf,
blen, blen, blen, strk, ib,
norb, na, nb, nlinka, nlinkb,
clinka, clinkb);
}
#pragma omp critical
axpy2d(ci1+ib, ci1buf, na, nb, blen);
#pragma omp barrier
}
free(ci1buf);
free(t1buf);
}
free(clinka);
free(clinkb);
}
|
gbdt.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
#include <vector>
#include "score_updater.hpp"
namespace LightGBM {
using json11::Json;
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequency of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
bool TrainOneIter_new(const score_t* gradients, const score_t* hessians,const score_t* gradients2, const score_t* hessians2) override;
bool TrainOneIter_old(const score_t* gradients, const score_t* hessians);
bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_ * num_labels_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param start_iteration Start index of the iteration to predict
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_pred_in_one_row = num_class_ * num_labels_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, max_iteration);
if (num_iteration > 0) {
num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration));
} else {
num_pred_in_one_row *= (max_iteration - start_iteration);
}
} else if (is_pred_contrib) {
num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_pred_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output) const override;
void PredictContribByMap(const std::unordered_map<int, double>& features,
std::vector<std::unordered_map<int, double>>* output) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration,
int feature_importance_type) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToFile(int start_iteration, int num_iterations,
int feature_importance_type,
const char* filename) const override;
bool SaveModelToFile(int start_iteration, int num_iterations,
int feature_importance_type,
int num_labels,
int num_label, const char* filename) override;
bool SetNumlabels(int num_labels) override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Non-empty string if succeeded
*/
std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Calculate upper bound value
* \return upper bound value
*/
double GetUpperBoundValue() const override;
/*!
* \brief Calculate lower bound value
* \return lower bound value
*/
double GetLowerBoundValue() const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, num_iteration_for_pred_);
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration);
} else {
num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration;
}
start_iteration_for_pred_ = start_iteration;
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
const char* SubModelName() const override { return "tree"; }
bool IsLinear() const override { return linear_tree_; }
inline std::string ParserConfigStr() const override {return parser_config_str_;}
protected:
virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) {
if (objective_function != nullptr) {
return objective_function->IsConstantHessian();
} else {
return false;
}
}
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current iteration
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Only use first metric for early stopping */
bool es_first_metric_only_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
/*! \brief Parser config file content */
std::string parser_config_str_ = "";
#ifdef USE_CUDA
/*! \brief First order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> hessians_;
#else
/*! \brief First order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_;
#endif
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Number of labels */
int num_labels_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Start iteration of used model */
int start_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
bool balanced_bagging_;
std::string loaded_parameter_;
std::vector<int8_t> monotone_constraints_;
const int bagging_rand_block_ = 1024;
std::vector<Random> bagging_rands_;
ParallelPartitionRunner<data_size_t, false> bagging_runner_;
Json forced_splits_json_;
bool linear_tree_;
int save_num_label = -1;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
par_relax_more.c |
/******************************************************************************
*
* a few more relaxation schemes: Chebychev, FCF-Jacobi, CG -
* these do not go through the CF interface (hypre_BoomerAMGRelaxIF)
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int*,HYPRE_Real *,HYPRE_Real *,HYPRE_Int *);
/******************************************************************************
*
*use max norm to estimate largest eigenvalue
*
*****************************************************************************/
HYPRE_Int hypre_ParCSRMaxEigEstimate(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Real *max_eig)
{
HYPRE_Real e_max;
HYPRE_Real row_sum, max_norm;
HYPRE_Real *col_val;
HYPRE_Real temp;
HYPRE_Real diag_value;
HYPRE_Int pos_diag, neg_diag;
HYPRE_Int start_row, end_row;
HYPRE_Int row_length;
HYPRE_Int *col_ind;
HYPRE_Int j;
HYPRE_Int i;
/* estimate with the inf-norm of A - should be ok for SPD matrices */
start_row = hypre_ParCSRMatrixFirstRowIndex(A);
end_row = hypre_ParCSRMatrixLastRowIndex(A);
max_norm = 0.0;
pos_diag = neg_diag = 0;
for ( i = start_row; i <= end_row; i++ )
{
HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) A, i, &row_length, &col_ind, &col_val);
row_sum = 0.0;
for (j = 0; j < row_length; j++)
{
if (j==0) diag_value = fabs(col_val[j]);
row_sum += fabs(col_val[j]);
if ( col_ind[j] == i && col_val[j] > 0.0 ) pos_diag++;
if ( col_ind[j] == i && col_val[j] < 0.0 ) neg_diag++;
}
if (scale)
{
if (diag_value != 0.0)
row_sum = row_sum/diag_value;
}
if ( row_sum > max_norm ) max_norm = row_sum;
HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) A, i, &row_length, &col_ind, &col_val);
}
/* get max across procs */
hypre_MPI_Allreduce(&max_norm, &temp, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A));
max_norm = temp;
/* from Charles */
if ( pos_diag == 0 && neg_diag > 0 ) max_norm = - max_norm;
/* eig estimates */
e_max = max_norm;
/* return */
*max_eig = e_max;
return hypre_error_flag;
}
/******************************************************************************
use CG to get the eigenvalue estimate
scale means get eig est of (D^{-1/2} A D^{-1/2}
******************************************************************************/
HYPRE_Int hypre_ParCSRMaxEigEstimateCG(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int max_iter,
HYPRE_Real *max_eig,
HYPRE_Real *min_eig)
{
HYPRE_Int i, j, err;
hypre_ParVector *p;
hypre_ParVector *s;
hypre_ParVector *r;
hypre_ParVector *ds;
hypre_ParVector *u;
HYPRE_Real *tridiag = NULL;
HYPRE_Real *trioffd = NULL;
HYPRE_Real lambda_max ;
HYPRE_Real beta, gamma = 0.0, alpha, sdotp, gamma_old, alphainv;
HYPRE_Real diag;
HYPRE_Real lambda_min;
HYPRE_Real *s_data, *p_data, *ds_data, *u_data;
HYPRE_Int local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
/* check the size of A - don't iterate more than the size */
HYPRE_Int size = hypre_ParCSRMatrixGlobalNumRows(A);
if (size < max_iter)
max_iter = size;
/* create some temp vectors: p, s, r , ds, u*/
r = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(r);
hypre_ParVectorSetPartitioningOwner(r,0);
p = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(p);
hypre_ParVectorSetPartitioningOwner(p,0);
s = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(s);
hypre_ParVectorSetPartitioningOwner(s,0);
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
u = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(u);
hypre_ParVectorSetPartitioningOwner(u,0);
/* point to local data */
s_data = hypre_VectorData(hypre_ParVectorLocalVector(s));
p_data = hypre_VectorData(hypre_ParVectorLocalVector(p));
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
/* make room for tri-diag matrix */
tridiag = hypre_CTAlloc(HYPRE_Real, max_iter+1);
trioffd = hypre_CTAlloc(HYPRE_Real, max_iter+1);
for (i=0; i < max_iter + 1; i++)
{
tridiag[i] = 0;
trioffd[i] = 0;
}
/* set residual to random */
hypre_ParVectorSetRandomValues(r,1);
if (scale)
{
for (i = 0; i < local_size; i++)
{
diag = A_diag_data[A_diag_i[i]];
ds_data[i] = 1/sqrt(diag);
}
}
else
{
/* set ds to 1 */
hypre_ParVectorSetConstantValues(ds,1.0);
}
/* gamma = <r,Cr> */
gamma = hypre_ParVectorInnerProd(r,p);
/* for the initial filling of the tridiag matrix */
beta = 1.0;
i = 0;
while (i < max_iter)
{
/* s = C*r */
/* TO DO: C = diag scale */
hypre_ParVectorCopy(r, s);
/*gamma = <r,Cr> */
gamma_old = gamma;
gamma = hypre_ParVectorInnerProd(r,s);
if (i==0)
{
beta = 1.0;
/* p_0 = C*r */
hypre_ParVectorCopy(s, p);
}
else
{
/* beta = gamma / gamma_old */
beta = gamma / gamma_old;
/* p = s + beta p */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j=0; j < local_size; j++)
{
p_data[j] = s_data[j] + beta*p_data[j];
}
}
if (scale)
{
/* s = D^{-1/2}A*D^{-1/2}*p */
for (j = 0; j < local_size; j++)
{
u_data[j] = ds_data[j] * p_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, s);
for (j = 0; j < local_size; j++)
{
s_data[j] = ds_data[j] * s_data[j];
}
}
else
{
/* s = A*p */
hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, s);
}
/* <s,p> */
sdotp = hypre_ParVectorInnerProd(s,p);
/* alpha = gamma / <s,p> */
alpha = gamma/sdotp;
/* get tridiagonal matrix */
alphainv = 1.0/alpha;
tridiag[i+1] = alphainv;
tridiag[i] *= beta;
tridiag[i] += alphainv;
trioffd[i+1] = alphainv;
trioffd[i] *= sqrt(beta);
/* x = x + alpha*p */
/* don't need */
/* r = r - alpha*s */
hypre_ParVectorAxpy( -alpha, s, r);
i++;
}
/* eispack routine - eigenvalues return in tridiag and ordered*/
hypre_LINPACKcgtql1(&i,tridiag,trioffd,&err);
lambda_max = tridiag[i-1];
lambda_min = tridiag[0];
/* hypre_printf("linpack max eig est = %g\n", lambda_max);*/
/* hypre_printf("linpack min eig est = %g\n", lambda_min);*/
hypre_TFree(tridiag);
hypre_TFree(trioffd);
hypre_ParVectorDestroy(r);
hypre_ParVectorDestroy(s);
hypre_ParVectorDestroy(p);
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(u);
/* return */
*max_eig = lambda_max;
*min_eig = lambda_min;
return hypre_error_flag;
}
/******************************************************************************
Chebyshev relaxation
Can specify order 1-4 (this is the order of the resid polynomial)- here we
explicitly code the coefficients (instead of
iteratively determining)
variant 0: standard chebyshev
this is rlx 11 if scale = 0, and 16 if scale == 1
variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t)
this is rlx 15 if scale = 0, and 17 if scale == 1
ratio indicates the percentage of the whole spectrum to use (so .5
means half, and .1 means 10percent)
*******************************************************************************/
HYPRE_Int hypre_ParCSRRelax_Cheby(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Real max_eig,
HYPRE_Real min_eig,
HYPRE_Real fraction,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *r /*another temp vector */ )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
HYPRE_Real *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
HYPRE_Real theta, delta;
HYPRE_Real den;
HYPRE_Real upper_bound, lower_bound;
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real coefs[5];
HYPRE_Real mult;
HYPRE_Real *orig_u;
HYPRE_Real tmp_d;
HYPRE_Int cheby_order;
HYPRE_Real *ds_data, *tmp_data;
HYPRE_Real diag;
hypre_ParVector *ds;
hypre_ParVector *tmp_vec;
/* u = u + p(A)r */
if (order > 4)
order = 4;
if (order < 1)
order = 1;
/* we are using the order of p(A) */
cheby_order = order -1;
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
/* lower_bound = max_eig/fraction; */
lower_bound = (upper_bound - min_eig)* fraction + min_eig;
/* theta and delta */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
if (variant == 1 )
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less that resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* (del - t + 2*th)/(th^2 + del*th) */
den = (theta*theta + delta*theta);
coefs[0] = (delta + 2*theta)/den;
coefs[1] = -1.0/den;
break;
case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/
den = 2*delta*theta*theta - delta*delta*theta - pow(delta,3) + 2*pow(theta,3);
coefs[0] = (4*delta*theta - pow(delta,2) + 6*pow(theta,2))/den;
coefs[1] = -(2*delta + 6*theta)/den;
coefs[2] = 2/den;
break;
case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/
den = - (4*delta*pow(theta,3) - 3*pow(delta,2)*pow(theta,2) - 3*pow(delta,3)*theta + 4*pow(theta,4) );
coefs[0] = (6*pow(delta,2)*theta - 12*delta*pow(theta,2) + 3*pow(delta,3) - 16*pow(theta,3) )/den;
coefs[1] = (12*delta*theta - 3*pow(delta,2) + 24*pow(theta,2))/den;
coefs[2] = -( 4*delta + 16*theta)/den;
coefs[3] = 4/den;
break;
}
}
else /* standard chebyshev */
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less thatn resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */
den = delta*delta - 2*theta*theta;
coefs[0] = -4*theta/den;
coefs[1] = 2/den;
break;
case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/
den = 3*(delta*delta)*theta - 4*(theta*theta*theta);
coefs[0] = (3*delta*delta - 12 *theta*theta)/den;
coefs[1] = 12*theta/den;
coefs[2] = -4/den;
break;
case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/
den = pow(delta,4) - 8*delta*delta*theta*theta + 8*pow(theta,4);
coefs[0] = (32*pow(theta,3)- 16*delta*delta*theta)/den;
coefs[1] = (8*delta*delta - 48*theta*theta)/den;
coefs[2] = 32*theta/den;
coefs[3] = -8/den;
break;
}
}
orig_u = hypre_CTAlloc(HYPRE_Real, num_rows);
if (!scale)
{
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
for ( i = 0; i < num_rows; i++ )
{
orig_u[i] = u_data[i];
u_data[i] = r_data[i] * coefs[cheby_order];
}
for (i = cheby_order - 1; i >= 0; i-- )
{
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + v_data[j];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for ( i = 0; i < num_rows; i++ )
{
u_data[i] = orig_u[i] + u_data[i];
}
}
else /* scaling! */
{
/*grab 1/sqrt(diagonal) */
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(tmp_vec);
hypre_ParVectorSetPartitioningOwner(tmp_vec,0);
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec));
/* get ds_data and get scaled residual: r = D^(-1/2)f -
* D^(-1/2)A*u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_rows; j++)
{
diag = A_diag_data[A_diag_i[j]];
ds_data[j] = 1/sqrt(diag);
r_data[j] = ds_data[j] * f_data[j];
}
hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
r_data[j] += ds_data[j] * tmp_data[j];
}
/* save original u, then start
the iteration by multiplying r by the cheby coef.*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
orig_u[j] = u_data[j]; /* orig, unscaled u */
u_data[j] = r_data[j] * coefs[cheby_order];
}
/* now do the other coefficients */
for (i = cheby_order - 1; i >= 0; i-- )
{
/* v = D^(-1/2)AD^(-1/2)u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_data[j] = ds_data[j] * u_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v);
/* u_new = coef*r + v*/
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,tmp_d) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_d = ds_data[j]* v_data[j];
u_data[j] = mult * r_data[j] + tmp_d;
}
} /* end of cheby_order loop */
/* now we have to scale u_data before adding it to u_orig*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = orig_u[j] + ds_data[j]*u_data[j];
}
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(tmp_vec);
}/* end of scaling code */
hypre_TFree(orig_u);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax_FCFJacobi
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGRelax_FCFJacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Real relax_weight,
hypre_ParVector *u,
hypre_ParVector *Vtemp)
{
HYPRE_Int i;
HYPRE_Int relax_points[3];
HYPRE_Int relax_type = 0;
hypre_ParVector *Ztemp = NULL;
relax_points[0] = -1; /*F */
relax_points[1] = 1; /*C */
relax_points[2] = -1; /*F */
/* if we are on the coarsest level ,the cf_marker will be null
and we just do one sweep regular jacobi */
if (cf_marker == NULL)
{
hypre_BoomerAMGRelax(A,
f,
cf_marker,
relax_type,
0,
relax_weight,
0.0,
NULL,
u,
Vtemp, Ztemp);
}
else
{
for (i=0; i < 3; i++)
hypre_BoomerAMGRelax(A,
f,
cf_marker,
relax_type,
relax_points[i],
relax_weight,
0.0,
NULL,
u,
Vtemp, Ztemp);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* CG Smoother -
*
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax_CG( HYPRE_Solver solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Int num_its)
{
HYPRE_PCGSetMaxIter(solver, num_its); /* max iterations */
HYPRE_ParCSRPCGSolve(solver, (HYPRE_ParCSRMatrix)A, (HYPRE_ParVector)f, (HYPRE_ParVector)u);
#if 0
{
HYPRE_Int myid;
HYPRE_Int num_iterations;
HYPRE_Real final_res_norm;
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid);
HYPRE_PCGGetNumIterations(solver, &num_iterations);
HYPRE_PCGGetFinalRelativeResidualNorm(solver, &final_res_norm);
if (myid ==0)
{
hypre_printf(" -----CG PCG Iterations = %d\n", num_iterations);
hypre_printf(" -----CG PCG Final Relative Residual Norm = %e\n", final_res_norm);
}
}
#endif
return hypre_error_flag;
}
/* tql1.f --
this is the eispack translation - from Barry Smith in Petsc
Note that this routine always uses real numbers (not complex) even
if the underlying matrix is Hermitian. This is because the Lanczos
process applied to Hermitian matrices always produces a real,
symmetric tridiagonal matrix.
*/
HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real*,HYPRE_Real*);
HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int *n,HYPRE_Real *d,HYPRE_Real *e,HYPRE_Int *ierr)
{
/* System generated locals */
HYPRE_Int i__1,i__2;
HYPRE_Real d__1,d__2,c_b10 = 1.0;
/* Local variables */
HYPRE_Real c,f,g,h;
HYPRE_Int i,j,l,m;
HYPRE_Real p,r,s,c2,c3 = 0.0;
HYPRE_Int l1,l2;
HYPRE_Real s2 = 0.0;
HYPRE_Int ii;
HYPRE_Real dl1,el1;
HYPRE_Int mml;
HYPRE_Real tst1,tst2;
/* THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL1, */
/* NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND */
/* WILKINSON. */
/* HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). */
/* THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC */
/* TRIDIAGONAL MATRIX BY THE QL METHOD. */
/* ON INPUT */
/* N IS THE ORDER OF THE MATRIX. */
/* D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. */
/* E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX */
/* IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. */
/* ON OUTPUT */
/* D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN */
/* ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND */
/* ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE */
/* THE SMALLEST EIGENVALUES. */
/* E HAS BEEN DESTROYED. */
/* IERR IS SET TO */
/* ZERO FOR NORMAL RETURN, */
/* J IF THE J-TH EIGENVALUE HAS NOT BEEN */
/* DETERMINED AFTER 30 ITERATIONS. */
/* CALLS CGPTHY FOR DSQRT(A*A + B*B) . */
/* QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, */
/* MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
*/
/* THIS VERSION DATED AUGUST 1983. */
/* ------------------------------------------------------------------
*/
HYPRE_Real ds;
--e;
--d;
*ierr = 0;
if (*n == 1) {
goto L1001;
}
i__1 = *n;
for (i = 2; i <= i__1; ++i) {
e[i - 1] = e[i];
}
f = 0.;
tst1 = 0.;
e[*n] = 0.;
i__1 = *n;
for (l = 1; l <= i__1; ++l) {
j = 0;
h = (d__1 = d[l],fabs(d__1)) + (d__2 = e[l],fabs(d__2));
if (tst1 < h) {
tst1 = h;
}
/* .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... */
i__2 = *n;
for (m = l; m <= i__2; ++m) {
tst2 = tst1 + (d__1 = e[m],fabs(d__1));
if (tst2 == tst1) {
goto L120;
}
/* .......... E(N) IS ALWAYS ZERO,SO THERE IS NO EXIT */
/* THROUGH THE BOTTOM OF THE LOOP .......... */
}
L120:
if (m == l) {
goto L210;
}
L130:
if (j == 30) {
goto L1000;
}
++j;
/* .......... FORM SHIFT .......... */
l1 = l + 1;
l2 = l1 + 1;
g = d[l];
p = (d[l1] - g) / (e[l] * 2.);
r = hypre_LINPACKcgpthy(&p,&c_b10);
ds = 1.0; if (p < 0.0) ds = -1.0;
d[l] = e[l] / (p + ds*r);
d[l1] = e[l] * (p + ds*r);
dl1 = d[l1];
h = g - d[l];
if (l2 > *n) {
goto L145;
}
i__2 = *n;
for (i = l2; i <= i__2; ++i) {
d[i] -= h;
}
L145:
f += h;
/* .......... QL TRANSFORMATION .......... */
p = d[m];
c = 1.;
c2 = c;
el1 = e[l1];
s = 0.;
mml = m - l;
/* .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... */
i__2 = mml;
for (ii = 1; ii <= i__2; ++ii) {
c3 = c2;
c2 = c;
s2 = s;
i = m - ii;
g = c * e[i];
h = c * p;
r = hypre_LINPACKcgpthy(&p,&e[i]);
e[i + 1] = s * r;
s = e[i] / r;
c = p / r;
p = c * d[i] - s * g;
d[i + 1] = h + s * (c * g + s * d[i]);
}
p = -s * s2 * c3 * el1 * e[l] / dl1;
e[l] = s * p;
d[l] = c * p;
tst2 = tst1 + (d__1 = e[l],fabs(d__1));
if (tst2 > tst1) {
goto L130;
}
L210:
p = d[l] + f;
/* .......... ORDER EIGENVALUES .......... */
if (l == 1) {
goto L250;
}
/* .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... */
i__2 = l;
for (ii = 2; ii <= i__2; ++ii) {
i = l + 2 - ii;
if (p >= d[i - 1]) {
goto L270;
}
d[i] = d[i - 1];
}
L250:
i = 1;
L270:
d[i] = p;
}
goto L1001;
/* .......... SET ERROR -- NO CONVERGENCE TO AN */
/* EIGENVALUE AFTER 30 ITERATIONS .......... */
L1000:
*ierr = l;
L1001:
return 0;
} /* cgtql1_ */
HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real *a,HYPRE_Real *b)
{
/* System generated locals */
HYPRE_Real ret_val,d__1,d__2,d__3;
/* Local variables */
HYPRE_Real p,r,s,t,u;
/* FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW */
/* Computing MAX */
d__1 = fabs(*a),d__2 = fabs(*b);
p = hypre_max(d__1,d__2);
if (!p) {
goto L20;
}
/* Computing MIN */
d__2 = fabs(*a),d__3 = fabs(*b);
/* Computing 2nd power */
d__1 = hypre_min(d__2,d__3) / p;
r = d__1 * d__1;
L10:
t = r + 4.;
if (t == 4.) {
goto L20;
}
s = r / t;
u = s * 2. + 1.;
p = u * p;
/* Computing 2nd power */
d__1 = s / u;
r = d__1 * d__1 * r;
goto L10;
L20:
ret_val = p;
return ret_val;
} /* cgpthy_ */
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax_L1_Jacobi (same as the one in AMS, but this allows CF)
u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax_L1_Jacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j;
HYPRE_Int ii, jj;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id ;
HYPRE_Real zero = 0.0;
HYPRE_Real res;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight * res)/l1_norms[i];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
return 0;
}
|
libm-nvptx.c | //===--------- libm/libm-nvptx.c ------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include <math.h>
#include <stddef.h>
#include <limits.h>
#include "libm-nvptx.h"
#pragma omp declare target
#if 0
#define __FAST_OR_SLOW(fast, slow) fast
#else
#define __FAST_OR_SLOW(fast, slow) slow
#endif
// BEGIN FLOAT
float acosf(float __a) { return __nv_acosf(__a); }
float acoshf(float __a) { return __nv_acoshf(__a); }
float asinf(float __a) { return __nv_asinf(__a); }
float asinhf(float __a) { return __nv_asinhf(__a); }
float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
float atanf(float __a) { return __nv_atanf(__a); }
float atanhf(float __a) { return __nv_atanhf(__a); }
float cbrtf(float __a) { return __nv_cbrtf(__a); }
float ceilf(float __a) { return __nv_ceilf(__a); }
float copysignf(float __a, float __b) {
return __nv_copysignf(__a, __b);
}
float cosf(float __a) {
return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
}
float coshf(float __a) { return __nv_coshf(__a); }
float cospif(float __a) { return __nv_cospif(__a); }
float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
float erfcf(float __a) { return __nv_erfcf(__a); }
float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
float erfcxf(float __a) { return __nv_erfcxf(__a); }
float erff(float __a) { return __nv_erff(__a); }
float erfinvf(float __a) { return __nv_erfinvf(__a); }
float exp10f(float __a) { return __nv_exp10f(__a); }
float exp2f(float __a) { return __nv_exp2f(__a); }
float expf(float __a) { return __nv_expf(__a); }
float expm1f(float __a) { return __nv_expm1f(__a); }
float fabsf(float __a) { return __nv_fabsf(__a); }
float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
float fdividef(float __a, float __b) {
#if __FAST_MATH__ && !__CUDA_PREC_DIV
return __nv_fast_fdividef(__a, __b);
#else
return __a / __b;
#endif
}
float floorf(float __f) { return __nv_floorf(__f); }
float fmaf(float __a, float __b, float __c) {
return __nv_fmaf(__a, __b, __c);
}
float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
int ilogbf(float __a) { return __nv_ilogbf(__a); }
int __finitef(float __a) { return __nv_finitef(__a); }
int __isinff(float __a) { return __nv_isinff(__a); }
int __isnanf(float __a) { return __nv_isnanf(__a); }
float j0f(float __a) { return __nv_j0f(__a); }
float j1f(float __a) { return __nv_j1f(__a); }
float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
float lgammaf(float __a) { return __nv_lgammaf(__a); }
long long llrintf(float __a) { return __nv_llrintf(__a); }
long long llroundf(float __a) { return __nv_llroundf(__a); }
float log10f(float __a) { return __nv_log10f(__a); }
float log1pf(float __a) { return __nv_log1pf(__a); }
float log2f(float __a) {
return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
}
float logbf(float __a) { return __nv_logbf(__a); }
float logf(float __a) {
return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
}
long long __float2ll_rn(float __a) { return __nv_float2ll_rn(__a); }
#if defined(__LP64__)
long lrintf(float __a) { return __float2ll_rn(__a); }
long lroundf(float __a) { return llroundf(__a); }
#else
long lrintf(float __a) { return __float2int_rn(__a); }
long lroundf(float __a) { return roundf(__a); }
#endif
float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
// nanf - missing
float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
float nextafterf(float __a, float __b) {
return __nv_nextafterf(__a, __b);
}
float norm3df(float __a, float __b, float __c) {
return __nv_norm3df(__a, __b, __c);
}
float norm4df(float __a, float __b, float __c, float __d) {
return __nv_norm4df(__a, __b, __c, __d);
}
float normcdff(float __a) { return __nv_normcdff(__a); }
float normf(int __dim, const float *__t) {
return __nv_normf(__dim, __t);
}
float powf(float __a, float __b) { return __nv_powf(__a, __b); }
float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
float remainderf(float __a, float __b) {
return __nv_remainderf(__a, __b);
}
float remquof(float __a, float __b, int *__c) {
return __nv_remquof(__a, __b, __c);
}
float rhypotf(float __a, float __b) {
return __nv_rhypotf(__a, __b);
}
float rintf(float __a) { return __nv_rintf(__a); }
float rnorm3df(float __a, float __b, float __c) {
return __nv_rnorm3df(__a, __b, __c);
}
float rnorm4df(float __a, float __b, float __c, float __d) {
return __nv_rnorm4df(__a, __b, __c, __d);
}
float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
float rnormf(int __dim, const float *__t) {
return __nv_rnormf(__dim, __t);
}
float roundf(float __a) { return __nv_roundf(__a); }
float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
float scalblnf(float __a, long __b) {
if (__b > INT_MAX)
return __a > 0 ? HUGE_VALF : -HUGE_VALF;
if (__b < INT_MIN)
return __a > 0 ? 0.f : -0.f;
return scalbnf(__a, (int)__b);
}
float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
int __signbitf(float __a) { return __nv_signbitf(__a); }
void sincosf(float __a, float *__sptr, float *__cptr) {
return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __sptr, __cptr);
}
void sincospif(float __a, float *__sptr, float *__cptr) {
return __nv_sincospif(__a, __sptr, __cptr);
}
float sinf(float __a) {
return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
}
float sinhf(float __a) { return __nv_sinhf(__a); }
float sinpif(float __a) { return __nv_sinpif(__a); }
float sqrtf(float __a) { return __nv_sqrtf(__a); }
float tanf(float __a) { return __nv_tanf(__a); }
float tanhf(float __a) { return __nv_tanhf(__a); }
float tgammaf(float __a) { return __nv_tgammaf(__a); }
float truncf(float __a) { return __nv_truncf(__a); }
float y0f(float __a) { return __nv_y0f(__a); }
float y1f(float __a) { return __nv_y1f(__a); }
float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
// BEGIN INTRINSICS
float __cosf(float __a) { return __nv_fast_cosf(__a); }
float __exp10f(float __a) { return __nv_fast_exp10f(__a); }
float __expf(float __a) { return __nv_fast_expf(__a); }
float __fadd_rd(float __a, float __b) {
return __nv_fadd_rd(__a, __b);
}
float __fadd_rn(float __a, float __b) {
return __nv_fadd_rn(__a, __b);
}
float __fadd_ru(float __a, float __b) {
return __nv_fadd_ru(__a, __b);
}
float __fadd_rz(float __a, float __b) {
return __nv_fadd_rz(__a, __b);
}
float __fdiv_rd(float __a, float __b) {
return __nv_fdiv_rd(__a, __b);
}
float __fdiv_rn(float __a, float __b) {
return __nv_fdiv_rn(__a, __b);
}
float __fdiv_ru(float __a, float __b) {
return __nv_fdiv_ru(__a, __b);
}
float __fdiv_rz(float __a, float __b) {
return __nv_fdiv_rz(__a, __b);
}
float __fdividef(float __a, float __b) {
return __nv_fast_fdividef(__a, __b);
}
float __fmaf_rd(float __a, float __b, float __c) {
return __nv_fmaf_rd(__a, __b, __c);
}
float __fmaf_rn(float __a, float __b, float __c) {
return __nv_fmaf_rn(__a, __b, __c);
}
float __fmaf_ru(float __a, float __b, float __c) {
return __nv_fmaf_ru(__a, __b, __c);
}
float __fmaf_rz(float __a, float __b, float __c) {
return __nv_fmaf_rz(__a, __b, __c);
}
float __fmul_rd(float __a, float __b) {
return __nv_fmul_rd(__a, __b);
}
float __fmul_rn(float __a, float __b) {
return __nv_fmul_rn(__a, __b);
}
float __fmul_ru(float __a, float __b) {
return __nv_fmul_ru(__a, __b);
}
float __fmul_rz(float __a, float __b) {
return __nv_fmul_rz(__a, __b);
}
float __frcp_rd(float __a) { return __nv_frcp_rd(__a); }
float __frcp_rn(float __a) { return __nv_frcp_rn(__a); }
float __frcp_ru(float __a) { return __nv_frcp_ru(__a); }
float __frcp_rz(float __a) { return __nv_frcp_rz(__a); }
float __fsqrt_rd(float __a) { return __nv_fsqrt_rd(__a); }
float __fsqrt_rn(float __a) { return __nv_fsqrt_rn(__a); }
float __fsqrt_ru(float __a) { return __nv_fsqrt_ru(__a); }
float __fsqrt_rz(float __a) { return __nv_fsqrt_rz(__a); }
float __fsub_rd(float __a, float __b) {
return __nv_fsub_rd(__a, __b);
}
float __fsub_rn(float __a, float __b) {
return __nv_fsub_rn(__a, __b);
}
float __fsub_ru(float __a, float __b) {
return __nv_fsub_ru(__a, __b);
}
float __fsub_rz(float __a, float __b) {
return __nv_fsub_rz(__a, __b);
}
float __log10f(float __a) { return __nv_fast_log10f(__a); }
float __log2f(float __a) { return __nv_fast_log2f(__a); }
float __logf(float __a) { return __nv_fast_logf(__a); }
float __powf(float __a, float __b) {
return __nv_fast_powf(__a, __b);
}
float __saturatef(float __a) { return __nv_saturatef(__a); }
void __sincosf(float __a, float *__sptr, float *__cptr) {
return __nv_fast_sincosf(__a, __sptr, __cptr);
}
float __sinf(float __a) { return __nv_fast_sinf(__a); }
float __tanf(float __a) { return __nv_fast_tanf(__a); }
// BEGIN DOUBLE
double acos(double __a) { return __nv_acos(__a); }
double acosh(double __a) { return __nv_acosh(__a); }
double asin(double __a) { return __nv_asin(__a); }
double asinh(double __a) { return __nv_asinh(__a); }
double atan(double __a) { return __nv_atan(__a); }
double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
double atanh(double __a) { return __nv_atanh(__a); }
double cbrt(double __a) { return __nv_cbrt(__a); }
double ceil(double __a) { return __nv_ceil(__a); }
double copysign(double __a, double __b) {
return __nv_copysign(__a, __b);
}
double cos(double __a) { return __nv_cos(__a); }
double cosh(double __a) { return __nv_cosh(__a); }
double cospi(double __a) { return __nv_cospi(__a); }
double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
double erf(double __a) { return __nv_erf(__a); }
double erfc(double __a) { return __nv_erfc(__a); }
double erfcinv(double __a) { return __nv_erfcinv(__a); }
double erfcx(double __a) { return __nv_erfcx(__a); }
double erfinv(double __a) { return __nv_erfinv(__a); }
double exp(double __a) { return __nv_exp(__a); }
double exp10(double __a) { return __nv_exp10(__a); }
double exp2(double __a) { return __nv_exp2(__a); }
double expm1(double __a) { return __nv_expm1(__a); }
double fabs(double __a) { return __nv_fabs(__a); }
double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
double floor(double __f) { return __nv_floor(__f); }
double fma(double __a, double __b, double __c) {
return __nv_fma(__a, __b, __c);
}
double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
int ilogb(double __a) { return __nv_ilogb(__a); }
int __finite(double __a) { return __nv_isfinited(__a); }
int __isinf(double __a) { return __nv_isinfd(__a); }
int __isnan(double __a) { return __nv_isnand(__a); }
double j0(double __a) { return __nv_j0(__a); }
double j1(double __a) { return __nv_j1(__a); }
double jn(int __n, double __a) { return __nv_jn(__n, __a); }
double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
double lgamma(double __a) { return __nv_lgamma(__a); }
long long llrint(double __a) { return __nv_llrint(__a); }
long long llround(double __a) { return __nv_llround(__a); }
double log(double __a) { return __nv_log(__a); }
double log10(double __a) { return __nv_log10(__a); }
double log1p(double __a) { return __nv_log1p(__a); }
double log2(double __a) { return __nv_log2(__a); }
double logb(double __a) { return __nv_logb(__a); }
#if defined(__LP64__)
long lrint(double __a) { return llrint(__a); }
long lround(double __a) { return llround(__a); }
#else
long lrint(double __a) { return (long)rint(__a); }
long lround(double __a) { return round(__a); }
#endif
double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
// nan - missing
double nearbyint(double __a) { return __nv_nearbyint(__a); }
double nextafter(double __a, double __b) {
return __nv_nextafter(__a, __b);
}
double norm(int __dim, const double *__t) {
return __nv_norm(__dim, __t);
}
double norm3d(double __a, double __b, double __c) {
return __nv_norm3d(__a, __b, __c);
}
double norm4d(double __a, double __b, double __c, double __d) {
return __nv_norm4d(__a, __b, __c, __d);
}
double normcdf(double __a) { return __nv_normcdf(__a); }
double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
double pow(double __a, double __b) { return __nv_pow(__a, __b); }
double rcbrt(double __a) { return __nv_rcbrt(__a); }
double remainder(double __a, double __b) {
return __nv_remainder(__a, __b);
}
double remquo(double __a, double __b, int *__c) {
return __nv_remquo(__a, __b, __c);
}
double rhypot(double __a, double __b) {
return __nv_rhypot(__a, __b);
}
double rint(double __a) { return __nv_rint(__a); }
double rnorm(int __a, const double *__b) {
return __nv_rnorm(__a, __b);
}
double rnorm3d(double __a, double __b, double __c) {
return __nv_rnorm3d(__a, __b, __c);
}
double rnorm4d(double __a, double __b, double __c, double __d) {
return __nv_rnorm4d(__a, __b, __c, __d);
}
double round(double __a) { return __nv_round(__a); }
double rsqrt(double __a) { return __nv_rsqrt(__a); }
double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
double scalbln(double __a, long __b) {
if (__b > INT_MAX)
return __a > 0 ? HUGE_VAL : -HUGE_VAL;
if (__b < INT_MIN)
return __a > 0 ? 0.0 : -0.0;
return scalbn(__a, (int)__b);
}
int __signbit(double __a) { return __nv_signbitd(__a); }
double sin(double __a) { return __nv_sin(__a); }
void sincos(double __a, double *__sptr, double *__cptr) {
return __nv_sincos(__a, __sptr, __cptr);
}
void sincospi(double __a, double *__sptr, double *__cptr) {
return __nv_sincospi(__a, __sptr, __cptr);
}
double sinh(double __a) { return __nv_sinh(__a); }
double sinpi(double __a) { return __nv_sinpi(__a); }
double sqrt(double __a) { return __nv_sqrt(__a); }
double tan(double __a) { return __nv_tan(__a); }
double tanh(double __a) { return __nv_tanh(__a); }
double tgamma(double __a) { return __nv_tgamma(__a); }
double trunc(double __a) { return __nv_trunc(__a); }
double y0(double __a) { return __nv_y0(__a); }
double y1(double __a) { return __nv_y1(__a); }
double yn(int __a, double __b) { return __nv_yn(__a, __b); }
// BEGIN INTRINSICS
double __dadd_rd(double __a, double __b) {
return __nv_dadd_rd(__a, __b);
}
double __dadd_rn(double __a, double __b) {
return __nv_dadd_rn(__a, __b);
}
double __dadd_ru(double __a, double __b) {
return __nv_dadd_ru(__a, __b);
}
double __dadd_rz(double __a, double __b) {
return __nv_dadd_rz(__a, __b);
}
double __ddiv_rd(double __a, double __b) {
return __nv_ddiv_rd(__a, __b);
}
double __ddiv_rn(double __a, double __b) {
return __nv_ddiv_rn(__a, __b);
}
double __ddiv_ru(double __a, double __b) {
return __nv_ddiv_ru(__a, __b);
}
double __ddiv_rz(double __a, double __b) {
return __nv_ddiv_rz(__a, __b);
}
double __dmul_rd(double __a, double __b) {
return __nv_dmul_rd(__a, __b);
}
double __dmul_rn(double __a, double __b) {
return __nv_dmul_rn(__a, __b);
}
double __dmul_ru(double __a, double __b) {
return __nv_dmul_ru(__a, __b);
}
double __dmul_rz(double __a, double __b) {
return __nv_dmul_rz(__a, __b);
}
double __drcp_rd(double __a) { return __nv_drcp_rd(__a); }
double __drcp_rn(double __a) { return __nv_drcp_rn(__a); }
double __drcp_ru(double __a) { return __nv_drcp_ru(__a); }
double __drcp_rz(double __a) { return __nv_drcp_rz(__a); }
double __dsqrt_rd(double __a) { return __nv_dsqrt_rd(__a); }
double __dsqrt_rn(double __a) { return __nv_dsqrt_rn(__a); }
double __dsqrt_ru(double __a) { return __nv_dsqrt_ru(__a); }
double __dsqrt_rz(double __a) { return __nv_dsqrt_rz(__a); }
double __dsub_rd(double __a, double __b) {
return __nv_dsub_rd(__a, __b);
}
double __dsub_rn(double __a, double __b) {
return __nv_dsub_rn(__a, __b);
}
double __dsub_ru(double __a, double __b) {
return __nv_dsub_ru(__a, __b);
}
double __dsub_rz(double __a, double __b) {
return __nv_dsub_rz(__a, __b);
}
double __fma_rd(double __a, double __b, double __c) {
return __nv_fma_rd(__a, __b, __c);
}
double __fma_rn(double __a, double __b, double __c) {
return __nv_fma_rn(__a, __b, __c);
}
double __fma_ru(double __a, double __b, double __c) {
return __nv_fma_ru(__a, __b, __c);
}
double __fma_rz(double __a, double __b, double __c) {
return __nv_fma_rz(__a, __b, __c);
}
// END DOUBLE
#pragma omp end declare target
|
GB_binop__div_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint64)
// A*D function (colscale): GB (_AxD__div_uint64)
// D*A function (rowscale): GB (_DxB__div_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__div_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__div_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint64)
// C=scalar+B GB (_bind1st__div_uint64)
// C=scalar+B' GB (_bind1st_tran__div_uint64)
// C=A+scalar GB (_bind2nd__div_uint64)
// C=A'+scalar GB (_bind2nd_tran__div_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (x, y, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT64 || GxB_NO_DIV_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 64) ; \
}
GrB_Info GB (_bind1st_tran__div_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 64) ; \
}
GrB_Info GB (_bind2nd_tran__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_set_unset_lock_hinted.c | /******************************************************************************\
Extended version of omp_set_unset_lock.c for testing hinted locks.
Check to make sure OpenMP locks guarantee mutual
exclusion for multiple threads.
\******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void cscall(int id, int n[1000], int *passed, omp_lock_t *lock) {
int i;
omp_set_lock( lock );
for (i = 0; i < 1000; i++) {
n[i] = id;
}
for (i = 0; i < 1000; i++) {
if ( n[i] != id ) {
*passed = 0;
}
}
omp_unset_lock( lock );
}
int hinted_lock(kmp_lock_hint_t hint) {
int passed, n[1000], j, id;
omp_lock_t lock;
passed = 1;
kmp_init_lock_hinted(&lock, hint);
#pragma omp parallel shared(n, passed, lock) private(id, j)
{
id = omp_get_thread_num();
for (j = 1; j <= 10000; j++) {
cscall( id, n, &passed, &lock );
}
}
omp_destroy_lock(&lock);
if (passed) {
return 0;
} else {
return 1;
}
}
int main() {
int ret = 0;
ret += hinted_lock(kmp_lock_hint_none);
ret += hinted_lock(kmp_lock_hint_contended);
ret += hinted_lock(kmp_lock_hint_uncontended);
ret += hinted_lock(kmp_lock_hint_nonspeculative);
ret += hinted_lock(kmp_lock_hint_speculative);
// This one will emit Warning on machines with no TSX.
ret += hinted_lock(kmp_lock_hint_adaptive);
if (ret) {
printf(" Test %s failed\n", __FILE__);
return 1;
} else {
printf(" Test %s passed\n", __FILE__);
return 0;
}
}
|
veccopy.c | #include <stdio.h>
#include <omp.h>
int main()
{
int N = 100000;
int a[N];
int b[N];
int i;
for (i=0; i<N; i++)
a[i]=0;
for (i=0; i<N; i++)
b[i]=i;
#pragma omp target parallel for map(from: a[0:N]) map(to: b[0:N])
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] ) {
rc++;
printf ("Wrong varlue: a[%d]=%d\n", i, a[i]);
}
if (!rc)
printf("Success\n");
return rc;
}
|
wand-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W AAA N N DDDD %
% W W A A NN N D D %
% W W W AAAAA N N N D D %
% WW WW A A N NN D D %
% W W A A N N DDDD %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickWand Wand View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define WandViewId "WandView"
/*
Typedef declarations.
*/
struct _WandView
{
size_t
id;
char
name[MaxTextExtent],
*description;
RectangleInfo
extent;
MagickWand
*wand;
CacheView
*view;
size_t
number_threads;
PixelWand
***pixel_wands;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneWandView() makes a copy of the specified wand view.
%
% The format of the CloneWandView method is:
%
% WandView *CloneWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport WandView *CloneWandView(const WandView *wand_view)
{
WandView
*clone_view;
register ssize_t
i;
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) clone_view->id);
clone_view->description=ConstantString(wand_view->description);
clone_view->view=CloneCacheView(wand_view->view);
clone_view->extent=wand_view->extent;
clone_view->number_threads=wand_view->number_threads;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,wand_view->exception);
for (i=0; i < (ssize_t) wand_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
wand_view->pixel_wands[i],wand_view->extent.width);
clone_view->debug=wand_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyWandView() deallocates memory associated with a wand view.
%
% The format of the DestroyWandView method is:
%
% WandView *DestroyWandView(WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport WandView *DestroyWandView(WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands,
wand_view->extent.width,wand_view->number_threads);
wand_view->view=DestroyCacheView(wand_view->view);
wand_view->exception=DestroyExceptionInfo(wand_view->exception);
wand_view->signature=(~WandSignature);
RelinquishWandId(wand_view->id);
wand_view=(WandView *) RelinquishMagickMemory(wand_view);
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferWandViewIterator() iterates over three wand views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination wand view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const WandView *source,
% const WandView *duplex,WandView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferWandViewIterator method is:
%
% MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
% WandView *duplex,WandView *destination,
% DuplexTransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o duplex: the duplex wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer,
void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict duplex_indexes,
*magick_restrict indexes;
register const PixelPacket
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetPixelBlack(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelBlack(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a wand view.
%
% The format of the GetWandViewException method is:
%
% char *GetWandViewException(const WandView *wand_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o wand_view: the pixel wand_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetWandViewException(const WandView *wand_view,
ExceptionType *severity)
{
char
*description;
assert(wand_view != (const WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=wand_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
*description='\0';
if (wand_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->reason),
MaxTextExtent);
if (wand_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewExtent() returns the wand view extent.
%
% The format of the GetWandViewExtent method is:
%
% RectangleInfo GetWandViewExtent(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewIterator() iterates over the wand view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const WandView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetWandViewIterator method is:
%
% MagickBooleanType GetWandViewIterator(WandView *source,
% GetWandViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetWandViewIterator(WandView *source,
GetWandViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewPixels() returns the wand view pixel_wands.
%
% The format of the GetWandViewPixels method is:
%
% PixelWand *GetWandViewPixels(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport PixelWand **GetWandViewPixels(const WandView *wand_view)
{
const int
id = GetOpenMPThreadId();
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewWand() returns the magick wand associated with the wand view.
%
% The format of the GetWandViewWand method is:
%
% MagickWand *GetWandViewWand(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickWand *GetWandViewWand(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWandView() returns MagickTrue if the the parameter is verified as a wand
% view object.
%
% The format of the IsWandView method is:
%
% MagickBooleanType IsWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickBooleanType IsWandView(const WandView *wand_view)
{
size_t
length;
if (wand_view == (const WandView *) NULL)
return(MagickFalse);
if (wand_view->signature != WandSignature)
return(MagickFalse);
length=strlen(WandViewId);
if (LocaleNCompare(wand_view->name,WandViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandView() returns a wand view required for all other methods in the
% Wand View API.
%
% The format of the NewWandView method is:
%
% WandView *NewWandView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport WandView *NewWandView(MagickWand *wand)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->wand=wand;
wand_view->exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,
wand_view->exception);
wand_view->extent.width=wand->images->columns;
wand_view->extent.height=wand->images->rows;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandViewExtent() returns a wand view required for all other methods
% in the Wand View API.
%
% The format of the NewWandViewExtent method is:
%
% WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,
wand_view->exception);
wand_view->wand=wand;
wand_view->extent.width=width;
wand_view->extent.height=height;
wand_view->extent.x=x;
wand_view->extent.y=y;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewDescription() associates a description with an image view.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewDescription(WandView *image_view,const char *description)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
% o description: the wand view description.
%
*/
MagickExport void SetWandViewDescription(WandView *wand_view,
const char *description)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewIterator() iterates over the wand view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetWandViewIterator method is:
%
% MagickBooleanType SetWandViewIterator(WandView *destination,
% SetWandViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the wand view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetWandViewIterator(WandView *destination,
SetWandViewMethod set,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (WandView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetWandViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (destination->extent.height-destination->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(destination_image,destination->description,
progress,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w T h r e a d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewThreads() sets the number of threads in a thread team.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewThreads(WandView *image_view,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o number_threads: the number of threads in a thread team.
%
*/
MagickExport void SetWandViewThreads(WandView *image_view,
const size_t number_threads)
{
assert(image_view != (WandView *) NULL);
assert(image_view->signature == MagickCoreSignature);
image_view->number_threads=number_threads;
if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource))
image_view->number_threads=GetOpenMPMaximumThreads();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferWandViewIterator() iterates over two wand views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination wand view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const WandView *source,
% WandView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferWandViewIterator method is:
%
% MagickBooleanType TransferWandViewIterator(WandView *source,
% WandView *destination,TransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferWandViewIterator(WandView *source,
WandView *destination,TransferWandViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateWandViewIterator() iterates over the wand view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateWandViewIterator method is:
%
% MagickBooleanType UpdateWandViewIterator(WandView *source,
% UpdateWandViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdateWandViewIterator(WandView *source,
UpdateWandViewMethod update,void *context)
{
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdateWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
SetPixelBlack(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
matrix.c |
#include "matrix.h"
/*
* matrix.c
*
* Copyright (c) 2014, Rafat Hussain
* License : BSD 3-Clause
* See COPYRIGHT for more details
*/
typedef struct {
double* a;
int b;
} vipair;
double macheps() {
double macheps;
macheps = 1.0;
while ((macheps + 1.0) > 1.0) {
macheps = macheps / 2.0;
}
macheps = macheps * 2;
return macheps;
}
double pmax(double a, double b) {
if (a > b) {
return a;
}
else {
return b;
}
}
double pmin(double a, double b) {
if (a < b) {
return a;
}
else {
return b;
}
}
int imax(int a, int b) {
if (a > b) {
return a;
}
else {
return b;
}
}
int imin(int a, int b) {
if (a < b) {
return a;
}
else {
return b;
}
}
double signx(double x) {
double sgn;
if (x >= 0.) {
sgn = 1.0;
}
else {
sgn = -1.0;
}
return sgn;
}
double l2norm(double *vec, int N) {
double l2, sum;
int i;
sum = 0.;
for (i = 0; i < N; ++i) {
sum += vec[i] * vec[i];
}
l2 = sqrt(sum);
return l2;
}
int compare (const void* ind1, const void* ind2)
{
if (*((vipair *)ind1)->a > *((vipair *)ind2)->a)
return -1;
else if (*((vipair *)ind1)->a < *((vipair *)ind2)->a)
return 1;
else
return 0;
}
void sort1d(double* v,int N, int* pos)
{
vipair* val = NULL;
int i;
if (N <= 0)
return;
val = malloc(sizeof(vipair) * N);
for (i = 0; i < N; ++i) {
val[i].a = &v[i];
val[i].b = i;
}
qsort(val, N, sizeof(vipair), compare);
for (i = 0; i < N; ++i)
pos[i] = val[i].b;
free(val);
}
double array_max_abs(double *array,int N) {
int i;
double m = 0.0;
for (i = 0; i < N;++i) {
if (fabs(array[i]) > m ) {
m = fabs(array[i]);
}
}
return m;
}
double array_max(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] > m ) {
m = array[i];
}
}
return m;
}
double array_min(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] < m ) {
m = array[i];
}
}
return m;
}
void dtranspose(double *sig, int rows, int cols,double *col) {
int max,ud,i,k;
if (rows >= cols) {
max = cols;
} else {
max = rows;
}
ud = 0;
for (i= -rows + 1; i < cols; i++) {
if (i <= 0) {
ud++;
if (ud >= max)
ud = max;
for (k = 0; k < ud; k++) {
col[k*rows+k-i] = sig[(k-i)*cols+k];
}
} else {
if (i - cols + rows > 0) {
ud--;
if (ud >= max)
ud = max;
}
for (k = 0; k < ud; k++) {
col[(k+i)*rows+k] = sig[k*cols+k+i];
}
}
}
}
void stranspose(double *sig, int rows, int cols,double *col) {
int t,u;
register int i,j;
// #pragma omp parallel for private(i,j,t,u)
for (i=0; i < rows; i++) {
t = i * cols;
u = 0;
for (j=0; j < cols; j++) {
col[u+i] = sig[j+t];
u+=rows;
}
}
}
void rtranspose(double *m, int rows, int cols,double *n, int r, int c) {
register int i,j;
int rm,cm;
int rm1,cm1,rm2,cm2;
int block;
block = (int) BLOCKSIZE;
if (rows <= block && cols <= block) {
for (i = 0; i < rows; ++i) {
for (j = 0; j < cols; ++j) {
n[i+j*r] = m[j+i*c];
//cout << *(n+i+j*r) << " ";
}
}
//cout << endl;
} else if (cols >= rows) {
rm = rows;
cm1 = (int) ceil((double) cols/2.0);
cm2 = cols - cm1;
rtranspose(m,rm,cm1,n,r,c);
rtranspose(m+cm1,rm,cm2,n+cm1*r,r,c);
} else if (rows > cols) {
rm1 = (int) ceil((double) rows/2.0);
rm2 = rows - rm1;
cm = cols;
rtranspose(m,rm1,cm,n,r,c);
rtranspose(m+rm1*c,rm2,cm,n+rm1,r,c);
}
}
void ctranspose(double *sig, int rows, int cols,double *col) {
int r,c;
int block;
block = (int) TBLOCK;
r= rows;
c = cols;
if (rows >= block || cols >= block) {
rtranspose(sig,rows,cols,col,r,c);
} else {
stranspose(sig,rows,cols,col);
}
}
void mtranspose(double *sig, int rows, int cols,double *col) {
int block;
block = (int) TBLOCK;
if (rows >= block && cols >= block) {
ctranspose(sig,rows,cols,col);
} else {
stranspose(sig,rows,cols,col);
}
}
void itranspose(double *A, int M, int N) {
int i, j, p, iter, iter2;
double *buf;
double temp;
if (M == N) {
for (i = 0; i < N; ++i) {
for (j = i + 1; j < N; ++j) {
temp = A[i + j*N];
A[i + j*N] = A[j + i*N];
A[j + i*N] = temp;
}
}
} else if (M > N) {
p = M - N;
buf = (double*)malloc(sizeof(double)* p * N);
memcpy(buf, A + N * N, sizeof(*A)*p*N);
for (i = 0; i < N; ++i) {
for (j = i + 1; j < N; ++j) {
temp = A[i + j*N];
A[i + j*N] = A[j + i*N];
A[j + i*N] = temp;
}
}
for (i = N - 1; i >= 1; --i) {
memmove(A + i*M, A + i*N, sizeof(*A)*M);
}
for (i = 0; i < N; ++i) {
iter = N + i * M;
for (j = 0; j < p; ++j) {
A[iter + j] = buf[j*N + i];
}
}
free(buf);
}
else if (M < N) {
p = N - M;
buf = (double*)malloc(sizeof(double)* p * M);
for (i = 0; i < M; ++i) {
iter = M + i*N;
for (j = 0; j < p; ++j) {
buf[j*M + i] = A[iter + j];
}
}
for (i = 1; i < M; ++i) {
memmove(A + i*M, A + i * N, sizeof(*A)*M);
}
for (i = 0; i < M; ++i) {
for (j = i + 1; j < M; ++j) {
temp = A[i + j*M];
A[i + j*M] = A[j + i*M];
A[j + i*M] = temp;
}
}
memcpy(A + M*M, buf, sizeof(*A)*p*M);
free(buf);
}
}
void mdisplay(double *A, int row, int col) {
int i,j;
printf("\n MATRIX Order : %d X %d \n \n",row,col);
for (i = 0; i < row; i++) {
printf("R%d: ",i);
for ( j = 0; j < col;j++) {
printf("%g ",A[i*col + j]);
}
printf(":R%d \n",i);
}
}
void madd(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A + B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
}
void msub(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A - B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] - B[i];
}
}
void scale(double *A, int rows, int cols, double alpha) {
int N,i;
/*
* A = alpha * A
* Matrix A is overwritten.
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N;++i) {
A[i] = alpha * A[i];
}
}
void nmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * B[j + k * cb];
}
}
}
}
void tmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
double *BT;
BT = (double*) malloc(sizeof(double) * ca * cb);
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
mtranspose(B,ca,cb,BT);
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * BT[k + j * rb];
}
}
}
free(BT);
}
void recmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
int m2,n2,p2;
register int i,j,k;
int u,v,t;
if (m + n + p <= CUTOFF) {
//#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sB;
u = i * sC;
t = j + u;
for (k = 0; k < n;++k) {
C[t] += A[k + v] * B[j + k * sC];
}
}
}
} else if (m >= n && m >= p) {
m2 = (int) ceil((double) m / 2.0);
recmult(A,B,C,m2,n,p,sA,sB,sC);
recmult(A + m2*sB,B,C + m2*sC,m-m2,n,p,sA,sB,sC);
} else if (n >= m && n >= p) {
n2 = (int) ceil((double) n / 2.0);
recmult(A,B,C,m,n2,p,sA,sB,sC);
recmult(A+n2,B+n2*sC,C,m,n-n2,p,sA,sB,sC);
} else if (p >= m && p >= n) {
p2 = (int) ceil((double) p / 2.0);
recmult(A,B,C,m,n,p2,sA,sB,sC);
recmult(A,B+p2,C+p2,m,n,p-p2,sA,sB,sC);
}
}
void rmult(double* A, double* B, double* C,int m,int n, int p) {
int strA,strB,strC;
int N;
register int i;
strA = m;
strB = n;
strC = p;
N = m * p;
for(i = 0; i < N; ++i) {
C[i] = 0.;
}
recmult(A,B,C,m,n,p,strA,strB,strC);
}
int findrec(int *a, int *b, int *c) {
int rec;
double da,db,dc,mul;
da = (double) *a;
db = (double) *b;
dc = (double) *c;
rec = 0;
mul = 1.;
while (da + db + dc > (double) CUTOFF) {
rec++;
mul *= 2;
da = ceil(da/2.);
db = ceil(db/2.);
dc = ceil(dc/2.);
}
*a = (int) da * mul;
*b = (int) db * mul;
*c = (int) dc * mul;
return rec;
}
void add_zero_pad(double *X, int rows, int cols, int zrow, int zcol,double *Y) {
int r,c,i,j,u,v;
r = rows + zrow;
c = cols + zcol;
for (i = 0; i < rows;++i) {
u = i*c;
v = i * cols;
for (j = 0; j < cols;++j) {
Y[u + j] = X[v + j];
}
for (j = cols; j < c;++j) {
Y[u + j] = 0.;
}
}
for (i = rows; i < r;++i) {
u = i*c;
for(j = 0; j < c;++j) {
Y[u + j] = 0.;
}
}
}
void remove_zero_pad(double *Y, int rows, int cols, int zrow, int zcol,double *Z) {
int r,c,i,j,u,v;
r = rows - zrow;
c = cols - zcol;
for (i = 0; i < r; ++i) {
u = i * c;
v = i * cols;
for (j = 0; j < c; ++j) {
Z[j + u] = Y[j + v];
}
}
}
void madd_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
}
void msub_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
}
void rmadd_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmadd_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmadd_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void rmsub_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmsub_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmsub_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void srecmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
register int i,j,k;
int u,v,t;
double sum;
double *A1,*B1;
double *a11,*a12,*a21,*a22;
double *b11,*b12,*b21,*b22;
double *c11,*c12,*c21,*c22;
double *m1,*m2,*m3,*m4,*m5,*m6,*m7;
int sm1,sm2,sm3,sm4,sm5,sm6,sm7;
int sA1,sB1;
if (m + n + p <= CUTOFF) {
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sA;
u = i * sC;
t = j + u;
sum = 0.;
for (k = 0; k < n;++k) {
sum += A[k + v] * B[j + k * sB];
}
C[t] = sum;
}
}
} else {
m/=2;n/=2;p/=2;
// A size mXn, C size mXp
a11 = A;
a12 = A + n;
a21 = A + m * sA;
a22 = A + n + m * sA;
//B size nXp
b11 = B;
b12 = B + p;
b21 = B + n * sB;
b22 = B + p + n * sB;
//C size mXp
c11 = C;
c12 = C + p;
c21 = C + m * sC;
c22 = C + p + m * sC;
// m matrices have dimension m X p each. See http://en.wikipedia.org/wiki/Strassen_algorithm
m1 = (double*) malloc(sizeof(double) *m * p);
sm1 = p;
m3 = (double*) malloc(sizeof(double) *m * p);
sm3 = p;
m4 = (double*) malloc(sizeof(double) *m * p);
sm4 = p;
m2 = c21;
sm2 = sC;
m5 = c12;
sm5 = sC;
m6 = c22;
sm6 = sC;
m7 = c11;
sm7 = sC;
//m1
sA1 = n;
sB1 = p;
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
madd_stride(a11,a22,A1,m,n,sA,sA,sA1);
madd_stride(b11,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m1,m,n,p,sA1,sB1,sm1);
free(A1);
free(B1);
//m2
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a21,a22,A1,m,n,sA,sA,sA1);
srecmult(A1,b11,m2,m,n,p,sA1,sB,sm2);
free(A1);
//m3
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b12,b22,B1,n,p,sB,sB,sB1);
srecmult(a11,B1,m3,m,n,p,sA,sB1,sm3);
free(B1);
//m4
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b21,b11,B1,n,p,sB,sB,sB1);
srecmult(a22,B1,m4,m,n,p,sA,sB1,sm4);
free(B1);
//m5
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a11,a12,A1,m,n,sA,sA,sA1);
srecmult(A1,b22,m5,m,n,p,sA1,sB,sm5);
free(A1);
//m6
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a21,a11,A1,m,n,sA,sA,sA1);
madd_stride(b11,b12,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m6,m,n,p,sA1,sB1,sm6);
free(A1);
free(B1);
//m7
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a12,a22,A1,m,n,sA,sA,sA1);
madd_stride(b21,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m7,m,n,p,sA1,sB1,sm7);
free(A1);
free(B1);
// c11
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m7,m7,m,p,sm1,sm7,sm7);
msub_stride(m4,m5,A1,m,p,sm4,sm5,sA1);
madd_stride(m7,A1,m7,m,p,sm7,sA1,sm7);
free(A1);
// c22
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m6,m6,m,p,sm1,sm6,sm6);
msub_stride(m3,m2,A1,m,p,sm3,sm2,sA1);
madd_stride(m6,A1,m6,m,p,sm6,sA1,sm6);
free(A1);
//c12
madd_stride(m3,m5,m5,m,p,sm3,sm5,sm5);
//c21
madd_stride(m4,m2,m2,m,p,sm4,sm2,sm2);
free(m1);
free(m3);
free(m4);
}
}
void smult(double* A, double* B, double* C,int m,int n, int p) {
int a,b,c,nrec;
double *X,*Y,*Z,*P;
a = m;
b = n;
c = p;
nrec = findrec(&a,&b,&c);
X = (double*) malloc(sizeof(double) * a * b);
Y = (double*) malloc(sizeof(double) * b * c);
Z = (double*) malloc(sizeof(double) * a * c);
P = (double*) malloc(sizeof(double) * (a/2) * (c/2));
add_zero_pad(A,m,n,a-m,b-n,X);
add_zero_pad(B,n,p,b-n,c-p,Y);
srecmult(X,Y,Z,a,b,c,b,c,c);
// Memory allocation needs work
remove_zero_pad(Z,a,c,a-m,c-p,C);
// free X,Y,Z
free(X);
free(Y);
free(Z);
free(P);
}
void mmult(double* A, double* B, double* C,int m,int n, int p) {
if (m+n+p <= CUTOFF/2) {
nmult(A,B,C,m,n,p);
} else {
smult(A,B,C,m,n,p);
}
}
static int pludecomp(double *A,int N,int *ipiv) {
int k,j,l,c1,c2,mind,tempi;
double ld,mult,mval,temp;
for(k=0;k < N;++k)
ipiv[k] = k;
for(k = 0; k < N-1; ++k) {
//c2 = k*N;
mval = fabs(A[k*N + k]);
mind = k;
for (j=k+1; j < N;++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if ( mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N;j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0.) {
for (j = k+1; j < N; ++j) {
c1 = j*N;
mult = A[c1+k] /= ld;
//printf("\n k %d j %d mult %lf \n",k,j,mult);
for(l = k+1; l < N; ++l) {
A[c1+l] -= mult * A[c2 + l];
}
}
}
}
return 0;
}
void ludecomp(double *A,int N,int *ipiv) {
pludecomp(A,N,ipiv);
}
void linsolve(double *A,int N,double *b,int *ipiv,double *x) {
int i,j,c1,l;
double *y;
double sum;
y = (double*) malloc(sizeof(double) *N);
/*
* Two step Solution L * U * x = b
* Let U*x = y
* Solve L * y = b for y (Forward Substitution
* Solve U * x = b for x (Back Substitution)
*/
for(i = 0; i < N;++i) {
y[i] = 0.;
x[i] = 0.;
if ( A[i*N + i] == 0.) {
printf("The Matrix system does not have a unique solution");
exit(1);
}
//printf("\n B %d",ipiv[i]);
}
// Forward Substitution
y[0] = b[ipiv[0]];
for(i = 1; i < N; ++i) {
sum = 0.;
c1 = i*N;
for(j = 0; j < i; ++j) {
sum += y[j] * A[c1 + j];
}
y[i] = b[ipiv[i]] - sum;
}
// Back Substitution
x[N - 1] = y[N - 1]/A[N * N - 1];
for (i = N - 2; i >= 0; i--) {
sum = 0.;
c1 = i*(N+1);
l=0;
for(j = i+1; j < N;j++) {
l++;
sum += A[c1 + l] * x[j];
}
x[i] = (y[i] - sum) / A[c1];
}
free(y);
}
void minverse(double *A,int N,int *ipiv,double *inv) {
int i,j,stride;
double *col,*x;
col = (double*) malloc(sizeof(double) * N);
x = (double*) malloc(sizeof(double) * N);
for (i = 0; i < N; ++i) {
col[i] = 0.;
x[i] = 0.;
}
for (i = 0; i < N; ++i) {
col[i] = 1.;
linsolve(A,N,col,ipiv,x);
stride = i;
for(j = 0; j < N;++j) {
inv[stride] = x[j];
stride+= N;
}
col[i] = 0.;
}
free(x);
free(col);
}
void eye(double *mat,int N) {
int i,j,t;
for(i = 0;i < N;++i) {
for(j =0; j < N;++j) {
t = i*N;
if (i == j) {
mat[t+j] = 1.;
} else {
mat[t+j] = 0.;
}
}
}
}
static double house_1(double*x,int N,double *v) {
double beta,mu,temp;
double *sigma;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
if (N > 1) {
mmult(x+1,x+1,sigma,1,N-1,1);
} else {
sigma[0] = 0.0;
}
v[0] =1.;
for (i = 1; i < N;++i) {
v[i] = x[i];
}
if (sigma[0] == 0. && x[0] >= 0.) {
beta = 0.;
} else if (sigma[0] == 0. && x[0] < 0.) {
beta = -2.;
}else {
mu = sqrt(sigma[0] + x[0] * x[0]);
if (x[0] <= 0.) {
v[0] = x[0] - mu;
} else {
v[0] = - sigma[0] / (x[0] + mu);
}
temp = v[0];
beta = (2.0 * v[0] * v[0]) /(sigma[0] + v[0] * v[0]);
for (i = 0; i < N;++i) {
v[i] /= temp;
}
}
free(sigma);
return beta;
}
double house_2(double*x,int N,double *v) {
double sgn,beta,sc;
double *sigma,*e;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
e = (double*) malloc(sizeof(double) * N);
beta = 2.0;
sgn = 1.0;
mmult(x,x,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
e[0] =1.;
for (i = 1; i < N;++i) {
e[i] = 0.;
}
if (x[0] > 0.) {
sgn = 1.0;
} else if (x[0] < 0.) {
sgn = -1.0;
} else if (x[0] == 0.) {
sgn = 0.;
}
sc = sigma[0] * sgn;
//scale(e,N,1,sc);
e[0] *= sc;
for(i = 0; i < N;++i) {
v[i] = e[i] + x[i];
}
mmult(v,v,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
for(i = 0; i < N;++i) {
v[i] = v[i] / sigma[0];
}
free(sigma);
free(e);
return beta;
}
double house(double*x,int N,double *v) {
double beta;
beta = house_1(x,N,v);
return beta;
}
void housemat(double *v, int N,double beta,double *mat) {
double *temp;
temp = (double*) malloc(sizeof(double) * N * N);
eye(mat,N);
mmult(v,v,temp,N,1,N);
scale(temp,N,N,beta);
msub(mat,temp,mat,N,N);
free(temp);
}
void qrdecomp(double *A, int M, int N,double *bvec) {
int j,i,k,u,t;
double *x,*v,*AT,*w;
double beta;
if (M < N) {
printf("M should be greater than or equal to N");
exit(1);
}
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(j = 0; j < N;++j) {
for(i=j;i < M;++i) {
x[i-j] = A[i*N+j];
}
beta = house(x,M-j,v);
bvec[j] = beta;
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = A[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,beta);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
A[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
if (j < M) {
for(i=j+1;i < M;++i) {
A[i*N+j] = v[i-j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void getQR(double *A,int M,int N,double *bvec,double *Q, double *R) {
int i,j,k,t,u;
double *x,*v,*AT,*w;
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(i = 0; i < N;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i > j) {
R[t+j] = 0.;
} else {
R[t+j] = A[t+j];
}
}
}
for(i = 0; i < M;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i == j) {
Q[t+j] = 1.;
} else {
Q[t+j] = 0.;
}
}
}
for(j = N-1; j >= 0;--j) {
v[0] = 1.;
for(i=j+1;i < M;++i) {
v[i-j] = A[i*N+j];
}
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = Q[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,bvec[j]);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
Q[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void hessenberg(double *A,int N) {
int k,i,j,t,u;
double *x,*v,*AT,*w;
double beta;
x = (double*) malloc(sizeof(double) * N);
v = (double*) malloc(sizeof(double) * N);
AT = (double*) malloc(sizeof(double) * N * N);
w = (double*) malloc(sizeof(double) * N);
for (k = 0; k < N-2;++k) {
for(i=k + 1;i < N;++i) {
x[i-k-1] = A[i*N+k];
//printf("x %lf \n",x[i-k-1]);
}
beta = house(x,N-k-1,v);
for (i=k+1; i < N; i++) {
t = i * N;
u = 0;
for (j=k; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=(N-k-1);
}
}
//mdisplay(AT,N-k,N-k-1);
mmult(AT,v,w,N-k,N-k-1,1);
scale(w,N-k,1,beta);
mmult(v,w,AT,N-k-1,1,N-k);
//mdisplay(AT,N-k-1,N-k);
for (i=k+1; i < N; i++) {
t = i * N;
for (j=k; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-k) + j - k];
}
}
//mdisplay(A,N,N);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
AT[u+j-k-1] = A[t+j];
}
}
//mdisplay(AT,N,N-k-1);
mmult(AT,v,w,N,N-k-1,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,N-k-1);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void francisQR(double *A,int N) {
int m,n,k,q,r,t,u,i,j;
double s,t2,beta;
double *x,*v,*AT,*w;
int NN;
/*
* Reference - Algorithm 7.5.1 Golub,van Loan Matrix Computations 3rd Edition
*/
x = (double*) malloc(sizeof(double) * 3);
v = (double*) malloc(sizeof(double) * 3);
AT = (double*) malloc(sizeof(double) * 3 * N);
w = (double*) malloc(sizeof(double) * N);
n = N-1;
m = n-1;
NN = N*N;
s = A[NN-1] + A[NN-N-2];
t2 = A[NN-1] * A[NN-N-2] - A[NN-2] * A[NN-N-1];
x[0] = A[0]*A[0] + A[1]*A[N] - s*A[0] + t2;
x[1] = A[N]*(A[0] + A[N+1] - s);
x[2] = A[N] * A[N+N+1];
if (N <= 2) {
return;
}
for (k = -1; k < N - 3;++k) {
beta = house(x,3,v);
//mdisplay(x,3,1);
if (k > 0) {
q = k;
} else {
q = 0;
}
//printf("q %d \n",q);
for (i=k+1; i < k+4; i++) {
t = i * N;
u = 0;
for (j=q; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=3;
}
}
mmult(AT,v,w,N-q,3,1);
scale(w,N-q,1,beta);
mmult(v,w,AT,3,1,N-q);
for (i=k+1; i < k+4; i++) {
t = i * N;
for (j=q; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-q) + j - q];
}
}
//mdisplay(A,N,N);
if (k+4 >= n) {
r = N;
} else {
r = k+4+1;
}
//printf("r %d \n",r);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
AT[u+j-k-1] = A[t+j];
}
}
mmult(AT,v,w,r,3,1);
scale(w,r,1,beta);
mmult(w,v,AT,r,1,3);
//mdisplay(AT,N,N-k-1);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
//mdisplay(A,N,N);
x[0] = A[N*(k+2) + k+1];
x[1] = A[N*(k+3) + k+1];
if (k < n-3) {
x[2] = A[N*(k+4) + k+1];
}
//mdisplay(x,3,1);
}
//mdisplay(x,2,1);
beta = house(x,2,v);
for (i=n-1; i < N; i++) {
t = i * N;
u = 0;
for (j=n-2; j < N; j++) {
AT[u+i-n+1] = A[j+t];
u+=2;
}
}
mmult(AT,v,w,3,2,1);
scale(w,3,1,beta);
mmult(v,w,AT,2,1,3);
for (i=n-1; i < N; i++) {
t = i * N;
for (j=n-2; j < N; j++) {
A[t+j] -= AT[(i-n+1)*3 + j - n + 2];
}
}
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
AT[u+j-n+1] = A[t+j];
}
}
mmult(AT,v,w,N,2,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,2);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
A[t+j] -= AT[u+j-n+1];
}
}
free(x);
free(v);
free(AT);
free(w);
}
void eig22(double *A, int stride,double *eigre,double *eigim) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,t2,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
eigre[0] = eigre[1] = at11;
eigim[0] = sqrt(-at12 * at21);
eigim[1] = -sqrt(-at12 * at21);
if ( at12*at21 >= 0) {
if (at12 == 0) {
c = 0;
s = 1;
c2 = 0;
s2 = 1;
cs = 0;
} else {
t = sqrt(at21/at12);
t2 = t * t;
cs = t/(1+t2);
c2 = (1+t2);
s2 = t2 /(1+t2);
}
eigim[0] = eigim[1] = 0.0;
eigre[0] = at11 - cs * (at12 + at21);
eigre[1] = at11 + cs * (at12 + at21);
}
}
int francis_iter(double *A, int N, double *H) {
int success,brkpoint;
int i,j,it,p,q,t,u;
double *temp;
success = 0;
brkpoint = 30 * N;
it = 0;
p = N - 1;
temp = (double*) malloc(sizeof(double) * N * N);
for(i = 0; i < N*N;++i) {
H[i] = A[i];
}
hessenberg(H,N);
while (p > 1 && it < brkpoint) {
while (p > 1 && (H[N*p + p-1] == 0 || H[N*(p-1) + p-2] == 0)) {
if (H[N*p + p-1] == 0) {
p--;
} else if (H[N*(p-1) + p-2] == 0) {
p=p-2;
}
}
if (p > 0) {
q = p-1;
while (q > 0 && fabs(H[N*q + q-1]) != 0) {
q--;
}
//printf("%d %d \n",q,p);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
temp[u+j-q] = H[t+j];
}
}
francisQR(temp,p-q+1);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
H[t+j] = temp[u+j-q];
}
}
//mdisplay(H,N,N);
for(i = q; i <= p-1;++i) {
if ( fabs(H[(i+1)*N+i]) <= TOL * (fabs(H[i*N+i]) + fabs(H[(i+1)*N+i+1]) ) ) {
H[(i+1)*N+i] = 0.;
}
}
it++;
//printf("iter %d \n",it);
}
}
if (it == brkpoint) {
success = 0;
} else {
success = 1;
}
free(temp);
return success;
}
static void eig2t(double *A, int stride) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
A[0] = at11;
A[1] = at12;
A[N] = at21;
A[N+1] = at22;
}
void eig(double *A,int N,double *eigre,double *eigim) {
int i,t,u,n;
double *H;
double t1,t2,cs;
H = (double*) malloc(sizeof(double) * N * N);
n = N - 1;
francis_iter(A,N,H);
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
eig2t(H+u+i,N);
i = i +2;
} else {
i++;
}
}
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
if (H[u+i+1] * H[t+i] < 0.) {
eigre[i] = H[u+i];
eigre[i+1] = H[t+i+1];
eigim[i] = sqrt(-H[u+i+1] * H[t+i]);
eigim[i+1] = -sqrt(-H[u+i+1] * H[t+i]);
} else {
if (H[u+i+1] == 0.) {
cs = 0.;
} else {
t1 = sqrt(H[t+i]/H[u+i+1]);
t2 = t1 * t1;
cs = t1/(1+t2);
}
eigre[i] = H[u+i] - cs * (H[u+i+1] + H[t+i]);
eigre[i+1] = H[u+i] + cs * (H[u+i+1] + H[t+i]);
eigim[i] = 0.;
eigim[i+1] = 0.;
}
i= i + 2;
} else {
eigre[i] = H[u+i];
eigim[i] = 0.;
i++;
}
}
if (i == n) {
eigre[i] = H[N*N - 1];
eigim[i] = 0.;
}
free(H);
}
static int rcholu(double *A,int N, int stride, double *U22) {
int sc;
int j,i,u,w;
double u11;
if (N == 1) {
if (A[0] > 0) {
A[0] = sqrt(A[0]);
return 0;
} else {
return -1;
}
} else {
if (A[0] < 0) {
return -1;
}
u11 = sqrt(A[0]);
A[0] = u11;
for (j = 1; j < N;++j) {
A[j] /= u11;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
sc = rcholu(A+stride+1,N-1,stride,U22);
if (sc == -1) {
return -1;
}
}
return sc;
}
static int rbcholu(double *A,int N, int stride, double *UB, double *UT) {
int bs,bb,i,j,Nb,t,k,u,v,w,sc;
double *b,*x,*U12,*U12T;
double sum;
bs = (int) BLOCKSIZE;
bb = bs*bs;
if (N <= BLOCKSIZE) {
sc = rcholu(A,N,stride,UB);
if (sc == -1) {
return -1;
}
} else {
Nb = N - bs;
x = (double*) malloc(sizeof(double) * bs);
b = (double*) malloc(sizeof(double) * bs);
U12T = (double*) malloc(sizeof(double) * Nb * bs);
U12 = (double*) malloc(sizeof(double) * Nb * bs);
rcholu(A,bs,stride,UB); // U11
for (i =0; i < bs;++i) {
t = i *stride;
u = 0;
for(j = 0; j < N;++j) {
UT[u+i] = A[j+t];
u += bs;
}
}
for(k = 0; k < Nb;++k) {
u = k * bs;
for(i = 0; i < bs;++i) {
b[i] = UT[bb+u+i];
x[i] = 0.;
}
for (i = 0; i < bs;++i) {
t = i*bs;
sum = 0;
for (j = 0; j < i;++j) {
sum += UT[t+j] * x[j];
}
x[i] = (b[i] - sum) / UT[t+i];
}
v = bs + k;
for(i = 0; i < bs;++i) {
A[v] = x[i];
U12T[u+i] = x[i];
v += stride;
}
}
mtranspose(U12T,Nb,bs,U12);
mmult(U12T,U12,UT,Nb,bs,Nb);
free(U12T);
free(U12);
free(b);
free(x);
for (i = 0; i < Nb; ++i) {
u = bs * stride + bs + i * stride;
w = i * Nb;
for(j = i; j < Nb;j++) {
A[j + u] -= UT[j + w];
}
}
sc = rbcholu(A + bs * stride + bs,Nb,stride,UB,UT);
if (sc == -1) {
return -1;
}
}
return sc;
}
int cholu(double *A, int N) {
int stride,i,j,t,sc;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
sc = rcholu(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
return sc;
}
int bcholu(double *A, int N) {
int stride,i,j,t,b,sc;
double *UB,*UT;
b = (int) BLOCKSIZE;
UT = (double*) malloc(sizeof(double) * N * N);
UB = (double*) malloc(sizeof(double) * b * b);
stride = N;
sc = rbcholu(A,N,stride,UB,UT);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(UB);
free(UT);
return sc;
}
int chol(double *A, int N) {
int sc;
if ( N <= (int) BLOCKSIZE) {
sc = cholu(A,N);
} else {
sc = bcholu(A,N);
}
return sc;
}
static void rchold(double *A,int N, int stride, double *U22) {
int j,i,u,w;
double d1;
if (N == 1) {
return;
} else {
d1 = A[0];
for (j = 1; j < N;++j) {
A[j] /= d1;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
scale(U22,N-1,N-1,d1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
rchold(A+stride+1,N-1,stride,U22);
}
}
void chold(double *A, int N) {
int stride,i,j,t;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
rchold(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
}
void svd_sort(double *U,int M,int N,double *V,double *q) {
/*
* Pavel Sakov's CSA SVD sort routine is used with some minor
* modifications. See The License below
*/
/*
* Copyright (C) 2000-2008 Pavel Sakov and CSIRO
Redistribution and use of material from the package `csa', with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of material must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. The names of the authors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*/
int i,j;
double *UT,*VT,*qq;
int *pos;
UT = (double*) malloc(sizeof(double) * N * M);
VT = (double*) malloc(sizeof(double) * N * N);
qq = (double*) malloc(sizeof(double) * N);
pos = (int*) malloc(sizeof(int) * N);
for(i = 0;i < N;++i) {
qq[i] = q[i];
}
for(i = 0;i < M*N;++i) {
UT[i] = U[i];
}
for(i = 0;i < N*N;++i) {
VT[i] = V[i];
}
//mtranspose(U,M,N,UT);
//mtranspose(V,N,N,VT);
sort1d(q,N,pos);
for(i = 0; i < N;++i) {
q[i] = qq[pos[i]];
for (j = 0; j < M;++j) {
U[j*N+i] = UT[j*N+pos[i]];
}
for (j = 0; j < N;++j) {
V[j*N+i] = VT[j*N+pos[i]];
}
}
free(UT);
free(VT);
free(qq);
free(pos);
}
int svd(double *A,int M,int N,double *U,double *V,double *q) {
int i,j,k,l,t,t2,ierr,cancel,iter,l1;
double eps,g,x,s,temp,f,h,c,y,z,scale;
double *e;
/*
THIS SUBROUTINE IS THE MODIFIED C TRANSLATION OF THE
EISPACK FORTRAN TRANSLATION OF THE ALGOL PROCEDURE SVD,
NUM. MATH. 14, 403-420(1970) BY GOLUB AND REINSCH.
HANDBOOK FOR AUTO. COMP., VOL II-LINEAR ALGEBRA, 134-151(1971).
*/
/*
* U = MXN
* V - NXN
* Q - NX1
*/
/*
* The program return error codes
*
* Code 0 if the computation is successful
* Code -1 If M < N . Transpose the matrix such that rows > columns and trye again
* Code 15 if maximum iterations are reached without achieving convergence. Increase SVDMAXITER value
* in matrix.h header file. Default Value is 50
*
*/
if (M < N) {
printf("Rows (M) should be greater than Columns (B) \n");
printf("Retry By Transposing the Input Matrix");
return -1;
}
e = (double*) malloc(sizeof(double) * N);
ierr = 0;
eps = macheps();
g = scale = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M) {
for(k = i; k < M;++k) {
scale += fabs(U[k*N+i]);
}
if (scale != 0.0) {
for(k = i; k < M;++k) {
t = k * N;
U[t+i] /= scale;
temp = U[t+i];
s += temp*temp;
}
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
if (i < N - 1) {
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += U[t+i]*U[t+j];
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += f * U[t+i];
}
}
}
for(k = i; k < M;++k) {
t = k * N;
U[t+i] *= scale;
}
}
}
q[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M && i != N - 1) {
t = i *N;
for(k = l; k < M;++k) {
scale += fabs(U[t+k]);
}
if (scale != 0.0) {
for(k = l; k < N;++k) {
U[t+k] /= scale;
temp = U[t+k];
s = s + temp*temp;
}
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
for (k = l; k < N; k++)
U[t+k] *= scale;
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
/*
ierr = 0;
eps = macheps();
tol = eps;
g = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = g;
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
temp = U[t+i];
s += temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += (U[t+i]*U[t+j]);
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += (f * U[t+i]);
}
}
}
q[i] = g;
s = 0.0;
t = i * N;
for(k = l; k < N;++k) {
temp = U[t+k];
s = s + temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
*/
//Accumulating Right Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
if (i < N - 1) {
if (g != 0.0) {
h = U[t+i+1] * g;
for(j = l;j < N;++j) {
V[j*N+i] = U[t+j] / h;
}
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < N;++k) {
s += U[t+k] * V[k*N+j];
}
for(k = l; k < N;++k) {
V[k*N+j] += (s * V[k*N+i]);
}
}
}
for(j = l; j < N;++j) {
V[t+j] = V[j*N+i] = 0.0;
}
}
V[t+i] = 1.0;
g = e[i];
l = i;
}
//Accumulating Left Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
l = i+1;
g = q[i];
if (i < N - 1) {
for(j = l;j < N;++j) {
U[t+j] = 0.0;
}
}
if (g != 0.0) {
if (i != N - 1) {
//h = U[t+i] * g;
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < M;++k) {
s += (U[k*N+i] * U[k*N+j]);
}
f = (s / U[t+i]) / g;
for(k = i; k < M;++k) {
U[k*N+j] += (f * U[k*N+i]);
}
}
}
for(j = i; j < M;++j) {
U[j*N+i] = U[j*N+i] / g;
}
} else {
for(j = i; j < M;++j) {
U[j*N+i] = 0.0;
}
}
U[t+i] += 1.0;
}
// mdisplay(U,M,N);
eps = eps * x;
for(k = N - 1; k >= 0; --k) {
iter = 0;
while(1) {
iter++;
if (iter > SVDMAXITER) {
printf("Convergence Not Achieved \n");
return 15;
}
cancel = 1;
for(l = k; l >= 0; --l) {
if (fabs(e[l]) <= eps) {
cancel = 0; //test f convergence
break;
}
if (fabs(q[l-1]) <= eps) {
//Cancel
break;
}
}
if (cancel) {
c = 0.0;
s = 1.0;
l1 = l - 1;
for(i = l; i <= k;++i) {
f = s*e[i];
e[i] *= c;
if (fabs(f) <= eps) {
break;
}
g = q[i];
h = q[i] = hypot(f,g);
c = g/h;
s = -f/h;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+l1];
z = U[t+i];
U[t+l1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
}
z = q[k];
if (l != k) {
x = q[l];
y = q[k-1];
g = e[k-1];
h = e[k];
f = 0.5 * (((g + z) / h) * ((g - z) / y) + y / h - h / y);
g = hypot(f,1.0);
if (f < 0.0) {
temp = f - g;
} else {
temp = f+g;
}
f = x - (z / x) * z + (h / x) * (y / temp - h);
//Next QR Transformation
c = s = 1.0;
for(i = l+1; i <= k;++i) {
g = e[i];
y = q[i];
h = s * g;
g = c * g;
e[i-1] = z = hypot(f,h);
c = f / z;
s = h / z;
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y *= c;
for(j = 0; j < N;++j) {
t = j * N;
x = V[t+i-1];
z = V[t+i];
V[t+i-1] = x * c + z * s;
V[t+i] = z * c - x * s;
}
q[i-1] = z = hypot(f,h);
if (z != 0.0) {
c = f / z;
s = h / z;
}
f = c * g + s * y;
x = c * y - s * g;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+i-1];
z = U[t+i];
U[t+i-1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
e[l] = 0.0;
e[k] = f;
q[k] = x;
} else {
//convergence
if (z < 0.0) {
q[k] = -z;
for (j = 0; j < N; j++) {
t = j *N;
V[t+k] = -V[t+k];
}
}
break;
}
}
}
svd_sort(U,M,N,V,q);
free(e);
return ierr;
}
int svd_transpose(double *A, int M, int N, double *U, double *V, double *q) {
int ret;
/* Call this routine if M < N
* U = MXM
* V - NXM
* Q - MX1
*/
if (M >= N) {
printf("M>=N. Use svd routine.\n");
exit(-1);
}
mtranspose(A, M, N, V);
ret = svd(V, N, M, V, U, q);
return ret;
}
static int rank_c(double *A, int M,int N) {
int i,rnk,ret;
double eps,tol,szmax,qmax;
double *U,*V,*q;
U = (double*) malloc(sizeof(double) * M*N);
V = (double*) malloc(sizeof(double) * N*N);
q = (double*) malloc(sizeof(double) * N);
eps = macheps();
rnk = 0;
if (M < N) {
//mtranspose(A,M,N,U);
szmax = (double) N;
} else {
szmax = (double) M;
}
ret = svd(A,M,N,U,V,q);
qmax = q[0];
if ( ret != 0) {
printf("Failed to Compute SVD");
free(U);
free(V);
free(q);
return -1;
}
tol = qmax*szmax *eps;
for(i = 0; i < N;++i) {
if (q[i] > tol) {
rnk++;
}
}
free(U);
free(V);
free(q);
return rnk;
}
int rank(double *A, int M,int N) {
int rnk;
double *AT;
AT = (double*) malloc(sizeof(double) * M*N);
if (M < N) {
mtranspose(A,M,N,AT);
rnk = rank_c(AT,N,M);
} else {
rnk = rank_c(A,M,N);
}
free(AT);
return rnk;
}
int lls_svd_multi(double *A, double *b, int M,int N, double *x) {
int rnk, ret, i;
double *U, *V, *q, *UT, *d;
double eps, tol, szmax, qmax;
if (M < N) {
printf("Rows (M) should be greater than Columns (B) \n");\
return -1;
}
U = (double*)malloc(sizeof(double)* M*N);
V = (double*)malloc(sizeof(double)* N*N);
q = (double*)malloc(sizeof(double)* N);
UT = (double*)malloc(sizeof(double)* M*N);
d = (double*)malloc(sizeof(double)* N);
/*
The code returns -1 if SVD computation fails else it returns the rank of the matrix A (and the real size of vector x)
*/
ret = svd(A, M, N, U, V, q);
if (ret != 0) {
printf("Failed to Compute SVD");
free(U);
free(V);
free(q);
free(UT);
free(d);
return -1;
}
szmax = (double)M;
eps = macheps();
rnk = 0;
qmax = q[0];
tol = qmax*szmax *eps;
for (i = 0; i < N; ++i) {
if (q[i] > tol) {
rnk++;
}
}
mtranspose(U, M, N, UT);
d = (double*)malloc(sizeof(double)* N);
mmult(UT, b, d, N, M, 1);
for (i = 0; i < rnk; ++i) {
d[i] /= q[i];
}
for (i = rnk; i < N; ++i) {
d[i] = 0.0;
}
mmult(V, d, x, N, N, 1);
free(U);
free(V);
free(q);
free(UT);
free(d);
return(rnk);
}
|
ContaminationEstimator.h | /*The MIT License (MIT)
Copyright (c) 2017 Fan Zhang, Hyun Min Kang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/* Contact: Fan Zhang <fanzhang@umich.edu> */
#ifndef CONTAMINATIONESTIMATOR_H_
#define CONTAMINATIONESTIMATOR_H_
#include <string>
#include <unordered_map>
//#include <tkDecls.h>
#include "MathVector.h"
#include "MathGenMin.h"
#include "SimplePileupViewer.h"
#include <limits>
#ifdef _OPENMP
#include "omp.h"
#endif
class ContaminationEstimator {
public:
bool isPCFixed;
bool isAlphaFixed;
bool isAFknown;
bool isHeter;
bool isPileupInput;
bool isSanityCheckDisabled;
bool verbose;
int numPC;
int numThread;
int seed;
double epsilon;
#define PCtype double
//#define PHRED(x) pow(10.0,x/-10.0)
static double Phred(double x) {
return pow(10.0, x / -10.0);
}
class FullLLKFunc : public VectorFunc {
public:
double min_af;
double max_af;
double llk1;
double llk0;
ContaminationEstimator *ptr;
std::vector<double> fixPC;
std::vector<double> fixPC2;
double fixAlpha;
std::vector<double> globalPC;//best result holder
std::vector<double> globalPC2;//best result holder
double globalAlpha;//best result holder
const char *Base;
FullLLKFunc() {
FullLLKFunc::Base = "actg";
min_af = 0.00005;
max_af = 0.99995;
llk1 = 0;
ptr = nullptr;
fixAlpha = 0;
std::cerr << "Initialize from FullLLKFunc()" << std::endl;
}
FullLLKFunc(int dim, ContaminationEstimator *contPtr) : fixPC(dim, 0.), fixPC2(dim, 0.), globalPC(fixPC),
globalPC2(fixPC2) {
FullLLKFunc::Base = "actg";
min_af = 0.00005;
max_af = 0.99995;
llk1 = 0.;
ptr = contPtr;
fixAlpha = 0.;
globalAlpha = 0.;
std::cerr << "Initialize from FullLLKFunc(int dim, ContaminationEstimator* contPtr)" << std::endl;
}
~FullLLKFunc() {};
inline static double InvLogit(double &x) {
double e = exp(x);
return e / (1. + e);
};
inline static double Logit(double &x) {
return log(x / (1. - x));
};
inline int Normalize(std::vector<double> &tPC) {
for (int i = 0; i < tPC.size(); ++i) {
tPC[i] = (tPC[i] - ptr->muv[i]) / ptr->sdv[i];
}
return 0;
};
inline int InvNormalize(std::vector<double> &tPC) {
for (int i = 0; i < tPC.size(); ++i) {
tPC[i] = tPC[i] * ptr->sdv[i] + ptr->muv[i];
}
return 0;
};
inline char findAlt(std::vector<char> &tmpBase) {
int a[4];
int maxIndex(-1);
for (int i = 0; i < tmpBase.size(); ++i) {
if (tmpBase[i] == '.' || tmpBase[i] == ',') continue;
if (tmpBase[i] == 'A' || tmpBase[i] == 'a') a[0]++;
else if (tmpBase[i] == 'C' || tmpBase[i] == 'c') a[1]++;
else if (tmpBase[i] == 'T' || tmpBase[i] == 't') a[2]++;
else if (tmpBase[i] == 'G' || tmpBase[i] == 'g') a[3]++;
maxIndex = 0;
}
if (maxIndex == -1) return 0;
for (int j = 0; j < 4; ++j) {
if (a[j] > a[maxIndex]) maxIndex = j;
}
return Base[maxIndex];
}
inline double getConditionalBaseLK(char base, int genotype, char altBase, bool is_error) {
if (!is_error) {
if (genotype == 0) {
if (base == '.' || base == ',') {
return 1;
} else
return 0;
} else if (genotype == 1) {
if (base == '.' || base == ',') {
return 0.5;
} else if (toupper(base) == toupper(altBase)) {
return 0.5;
} else
return 0;
} else if (genotype == 2) {
if (toupper(base) == toupper(altBase)) {
return 1;
} else
return 0;
} else {
std::cerr << "genotype error!" << std::endl;
exit(EXIT_FAILURE);
}
} else {
if (genotype == 0) {
if (base == '.' || base == ',') {
return 0;
} else if (toupper(base) == toupper(altBase)) {
return 1. / 3.;
} else
return 2. / 3.;
} else if (genotype == 1) {
if (base == '.' || base == ',') {
return 1. / 6.;
} else if (toupper(base) == toupper(altBase)) {
return 1. / 6.;
} else
return 2. / 3.;
} else if (genotype == 2) {
if (base == '.' || base == ',') {
return 1. / 3.;
}
if (toupper(base) == toupper(altBase)) {
return 0;
} else
return 2. / 3.;
} else {
std::cerr << "genotype error!" << std::endl;
exit(EXIT_FAILURE);
}
}
}
void InitialGF(double AF, double *GF) const {
if (AF < min_af) AF = min_af;
if (AF > max_af) AF = max_af;
GF[0] = (1 - AF) * (1 - AF);
GF[1] = 2 * (AF) * (1 - AF);
GF[2] = AF * AF;
}
inline double
ComputeMixLLKs(const std::vector<double> &tPC1, const std::vector<double> &tPC2, const double alpha) {
double sumLLK(0);
#ifdef _OPENMP
omp_set_num_threads(ptr->numThread);
#pragma omp parallel for reduction (+:sumLLK)
#endif
for (size_t i = 0; i < ptr->NumMarker; ++i) {
std::string chr = ptr->PosVec[i].first;
int pos = ptr->PosVec[i].second;
if (ptr->viewer.posIndex.find(chr) == ptr->viewer.posIndex.end()) {
continue;
} else if (ptr->viewer.posIndex[chr].find(pos) == ptr->viewer.posIndex[chr].end()) {
continue;
}
std::vector<char> tmpBase = ptr->viewer.GetBaseInfoAt(chr, pos);
std::vector<char> tmpQual = ptr->viewer.GetQualInfoAt(chr, pos);
if (tmpBase.size() == 0) continue;
if (not ptr->isSanityCheckDisabled and
(tmpBase.size() < (ptr->viewer.avgDepth - 3 * ptr->viewer.sdDepth) or
tmpBase.size() > (ptr->viewer.avgDepth + 3 * ptr->viewer.sdDepth)))
continue;
if (ptr->isAFknown) {
ptr->AFs[i] = ptr->AF2s[i] = ptr->knownAF[chr][pos];
} else {
ptr->AFs[i] = 0.;
for (int k = 0; k < tPC1.size(); ++k) {
ptr->AFs[i] += ptr->UD[i][k] * tPC1[k];
}
ptr->AFs[i] += ptr->means[i];
ptr->AFs[i] /= 2.0;
ptr->AF2s[i] = 0.;
for (int k = 0; k < tPC2.size(); ++k) {
ptr->AF2s[i] += ptr->UD[i][k] * tPC2[k];
}
ptr->AF2s[i] += ptr->means[i];
ptr->AF2s[i] /= 2.0;
}
double markerLK(0);
double GF[3];
double GF2[3];
InitialGF(ptr->AFs[i], GF);
InitialGF(ptr->AF2s[i], GF2);
char altBase = ptr->ChooseBed[chr][pos].second;
for (int geno1 = 0; geno1 < 3; ++geno1)
for (int geno2 = 0; geno2 < 3; ++geno2) {
double baseLK(0);
for (int j = 0; j < tmpBase.size(); ++j) {
baseLK += log((alpha * getConditionalBaseLK(tmpBase[j], geno1, altBase, 1) +
(1. - alpha) * getConditionalBaseLK(tmpBase[j], geno2, altBase, 1)) *
Phred(tmpQual[j] - 33)
+ (alpha * getConditionalBaseLK(tmpBase[j], geno1, altBase, 0) +
(1. - alpha) * getConditionalBaseLK(tmpBase[j], geno2, altBase, 0)) *
(1 - Phred(tmpQual[j] - 33)));
// std::cerr <<i<<"th marker\t"<<tmpBase[j]<<"\t"<<tmpQual[j]<<"\t"<<altBase<<"\tlocalAlpha:"<<localAlpha<<"\tgeno1:"<<geno1<<"\tgeno2:"<<geno2
// <<"\tgetConditionalBaseLK1:"<<getConditionalBaseLK(tmpBase[j], geno1, altBase, 1)<<"\t"<< getConditionalBaseLK(tmpBase[j], geno2, altBase, 1)<<"\tPhred:"<<Phred(tmpQual[j] - 33)
// <<"\tgetConditionalBaseLK0:"<<getConditionalBaseLK(tmpBase[j], geno1, altBase, 0)<<"\t"<<getConditionalBaseLK(tmpBase[j], geno2, altBase, 0)<< std::endl;
}
markerLK += exp(baseLK) * GF[geno1] * GF2[geno2];
}
if (markerLK > 0)
sumLLK += log(markerLK);
}
return sumLLK;
}
int Initialize() {
globalPC = fixPC = globalPC2 = fixPC2 = ptr->PC[1];//only intended smaple has pre defined PCs
globalAlpha = fixAlpha = ptr->alpha;
llk1 = (0 - ComputeMixLLKs(fixPC, fixPC2, fixAlpha));
for (int k = 0; k < ptr->numPC; ++k) {
//ptr->PC[0][k] = static_cast <double> (rand()) / static_cast <double> (RAND_MAX);
ptr->PC[0][k] = 0.01;
}
for (int k = 0; k < ptr->numPC; ++k) {
//ptr->PC[1][k] = static_cast <double> (rand()) / static_cast <double> (RAND_MAX);
ptr->PC[1][k] = 0.01;
}
//ptr->alpha = fabs(static_cast <double> (rand()) / static_cast <double> (RAND_MAX));
ptr->alpha = 0.03;
return 0;
}
int CalculateLLK0() {
llk0 = (0 - ComputeMixLLKs(globalPC, globalPC, 0));
return 0;
}
virtual double Evaluate(Vector &v) {
double smLLK = 0;
if (!ptr->isHeter) {
if (ptr->isPCFixed) {
double tmpAlpha = InvLogit(v[0]);
smLLK = 0 - ComputeMixLLKs(fixPC, fixPC2, tmpAlpha);
if (smLLK < llk1) {
llk1 = smLLK;
globalAlpha = tmpAlpha;
}
} else if (ptr->isAlphaFixed) {
std::vector<double> tmpPC(ptr->numPC, 0.);
for (int i = 0; i < ptr->numPC; ++i) {
tmpPC[i] = v[i];
}
smLLK = 0 - ComputeMixLLKs(tmpPC, tmpPC, fixAlpha);
if (smLLK < llk1) {
llk1 = smLLK;
globalPC = tmpPC;
globalPC2 = tmpPC;
}
} else {
std::vector<double> tmpPC(ptr->numPC, 0.);
for (int i = 0; i < ptr->numPC; ++i) {
tmpPC[i] = v[i];
}
double tmpAlpha = InvLogit(v[ptr->numPC]);
smLLK = 0 - ComputeMixLLKs(tmpPC, tmpPC, tmpAlpha);
if (smLLK < llk1) {
llk1 = smLLK;
globalPC = tmpPC;
globalPC2 = tmpPC;
globalAlpha = tmpAlpha;
}
}
} else//contamination source from different population
{
if (ptr->isPCFixed) {//only fixed for intended sample
std::vector<double> tmpPC(ptr->numPC, 0.);
for (int i = 0; i < ptr->numPC; ++i) {
tmpPC[i] = v[i];
}
double tmpAlpha = InvLogit(v[ptr->numPC]);
smLLK = 0 - ComputeMixLLKs(tmpPC, fixPC2, tmpAlpha);
if (smLLK < llk1) {
llk1 = smLLK;
globalPC = tmpPC;
globalAlpha = tmpAlpha;
}
} else if (ptr->isAlphaFixed) {
std::vector<double> tmpPC(ptr->numPC, 0.);
std::vector<double> tmpPC2(ptr->numPC, 0.);
for (int k = 0; k < v.Length(); ++k) {
if (k < ptr->numPC)
tmpPC[k] = v[k];
else if (k < ptr->numPC * 2)
tmpPC2[k - (ptr->numPC)] = v[k];
else {
error("Simplex Vector dimension error!");
exit(EXIT_FAILURE);
}
}
smLLK = 0 - ComputeMixLLKs(tmpPC, tmpPC2, fixAlpha);
if (smLLK < llk1) {
llk1 = smLLK;
globalPC = tmpPC;
globalPC2 = tmpPC2;
}
} else {
std::vector<double> tmpPC(ptr->numPC, 0.);
std::vector<double> tmpPC2(ptr->numPC, 0.);
double tmpAlpha(0.);
for (int k = 0; k < v.Length(); ++k) {
if (k < ptr->numPC)
tmpPC[k] = v[k];
else if (k < ptr->numPC * 2)
tmpPC2[k - (ptr->numPC)] = v[k];
else if (k == ptr->numPC * 2)
tmpAlpha = InvLogit(v[k]);
else {
error("Simplex Vector dimension error!");
exit(EXIT_FAILURE);
}
}
smLLK = (0 - ComputeMixLLKs(tmpPC, tmpPC2, tmpAlpha));
if (smLLK < llk1) {
llk1 = smLLK;
globalPC = tmpPC;
globalPC2 = tmpPC2;
globalAlpha = tmpAlpha;
}
}
}
if (ptr->verbose)
std::cerr << "globalPC:" << globalPC[0] << "\tglobalPC:" << globalPC[1]
<< "\tglobalPC2:" << globalPC2[0] << "\tglobalPC2:" << globalPC2[1]
<< "\tglobalAlpha:" << globalAlpha << "\tllk:" << llk1 << std::endl;
return smLLK;
}
};
SimplePileupViewer viewer;
uint32_t NumMarker;
FullLLKFunc fn;
std::unordered_map<std::string, std::unordered_map<uint32_t, double> > knownAF;
double alpha;//input alpha
std::vector<std::vector<PCtype> > UD;//input UD
std::vector<std::vector<PCtype> > PC;//input PC
std::vector<PCtype> means;
////
std::vector<PCtype> muv;
std::vector<PCtype> sdv;
////
std::vector<double> AFs;
std::vector<double> AF2s;
typedef std::unordered_map<std::string, std::unordered_map<int, std::pair<char, char> > > BED;
BED ChooseBed;//pos is 1-based
std::vector<region_t> BedVec;//serialized BED info, convenient for bam reading
std::vector<std::pair<std::string, int> > PosVec;
ContaminationEstimator();
ContaminationEstimator(int nPC, const char *bedFile, int nThread, double ep);
/*Initialize from existed UD*/
/*This assumes the markers are the same as the selected vcf*/
/*ContaminationEstimator(const std::string &UDpath, const std::string &PCpath, const std::string &Mean,
const std::string &pileup, const std::string &GLpath, const std::string &Bed);
*/
int ReadMatrixUD(const std::string &path);
int ReadMatrixPC(const std::string &path);
/*Intersect marker sites*/
/*
int ReadMatrixGL(const std::string &path);
*/
int ReadChooseBed(const std::string &path);
int ReadMean(const std::string &path);
int ReadAF(const std::string &path);
int ReadBam(const char *bamFile, const char *faiFile, const char *bedFile);
int ReadPileup(const std::string &pileupFile);
bool IsSanityCheckOK();
/*
int CheckMarkerSetConsistency();
int FormatMarkerIntersection();
*/
/*Optimize*/
int OptimizeLLK(const std::string &OutputPrefix);
~ContaminationEstimator();
/*
int RunFromVCF(const std::string VcfSiteAFFile, const std::string CurrentMPU, const std::string ReadGroup,
const std::string Prefix);
int RunFromSVDMatrix(const std::string UDpath, const std::string PCpath, const std::string Mean,
const std::string &MPUpath, const std::string &Bed, const std::string &Prefix,
const std::string &ReadGroup);
*/
int ReadSVDMatrix(const std::string &UDpath, const std::string &PCpath, const std::string &Mean);
/*
int FromBamtoPileup();
*/
bool OptimizeHomoFixedPC(AmoebaMinimizer &myMinimizer);
bool OptimizeHomoFixedAlpha(AmoebaMinimizer &myMinimizer);
bool OptimizeHomo(AmoebaMinimizer &myMinimizer);
bool OptimizeHeterFixedPC(AmoebaMinimizer &myMinimizer);
bool OptimizeHeterFixedAlpha(AmoebaMinimizer &myMinimizer);
bool OptimizeHeter(AmoebaMinimizer &myMinimizer);
};
#endif /* CONTAMINATIONESTIMATOR_H_ */
|
Jacobi2D-NaiveParallelSpaceTiled-OMP.test.c | /******************************************************************************
* Jacobi2D benchmark
* Basic parallelisation with OpenMP
*
* Usage:
* make omp
* export OMP_NUM_THREADS=8
* bin/Jacobi2D-NaiveParallel-OMP \
* `cat src/Jacobi2D-NaiveParallel-OMP.perfexecopts`
* For a run on 8 threads
******************************************************************************/
#include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#include <getopt.h>
#include <stdbool.h>
#include <ctype.h>
#include <math.h>
#include <assert.h>
#define STENCIL(read,write,x,y) space[write][x][y] = \
( space[read][x-1][y] +\
space[read][x][y] +\
space[read][x+1][y] +\
space[read][x][y+1] +\
space[read][x][y-1] )/5;
#include "util.h"
// main
// Stages
// 1 - command line parsing
// 2 - data allocation and initialization
// 3 - jacobi 1D timed within an openmp loop
// 4 - output and optional verification
int main( int argc, char* argv[] ){
// rather than calling fflush
setbuf(stdout, NULL);
// 1 - command line parsing
Params cmdLineArgs;
parseCmdLineArgs(&cmdLineArgs,argc,argv);
// 1a - figure out how many complete tiles
// and what this size of any incomplete tiles
// are gonna be
int tileCountX = cmdLineArgs.problemSize/cmdLineArgs.tile_len_x;
int tileCountY = cmdLineArgs.problemSize/cmdLineArgs.tile_len_y;
if(cmdLineArgs.problemSize % cmdLineArgs.tile_len_x != 0){
tileCountX += 1;
}
if(cmdLineArgs.problemSize % cmdLineArgs.tile_len_y != 0){
tileCountY += 1;
}
// 2 - data allocation and initialization
int lowerBound = 1;
int upperBound = lowerBound + cmdLineArgs.problemSize - 1;
double** space[2];
int i;
// allocate x axis
space[0] = (double**)malloc((cmdLineArgs.problemSize + 2) * sizeof(double*));
space[1] = (double**)malloc((cmdLineArgs.problemSize + 2) * sizeof(double*));
if( space[0] == NULL || space[1] == NULL ){
printf( "Could not allocate x axis of space array\n" );
exit(0);
}
// allocate y axis
for( i = 0; i < cmdLineArgs.problemSize + 2; ++i ){
space[0][i]=(double*)malloc((cmdLineArgs.problemSize + 2) * sizeof(double));
space[1][i]=(double*)malloc((cmdLineArgs.problemSize + 2) * sizeof(double));
if( space[0][i] == NULL || space[1][i] == NULL ){
printf( "Could not allocate y axis of space array\n" );
exit(0);
}
}
// use global seed to seed the random number gen (will be constant)
srand(cmdLineArgs.globalSeed);
// first touch for openmp
int x, y;
int itx, ity;
#pragma omp parallel for private( x, y ) schedule(dynamic)
for(itx=0; itx< tileCountX; itx++){
for(ity=0; ity < tileCountY; ity++ ){
int xlb = lowerBound+itx*cmdLineArgs.tile_len_x;
int xub = min(upperBound,lowerBound+itx*cmdLineArgs.tile_len_x+
cmdLineArgs.tile_len_x);
for( x = xlb; x <= xub; ++x ){
int ylb = lowerBound+ity*cmdLineArgs.tile_len_y;
int yub = min(lowerBound+ity*cmdLineArgs.tile_len_y+
cmdLineArgs.tile_len_y,upperBound);
for( y = ylb; y <= yub;++y ){
space[0][x][y] = 0;
space[1][x][y] = 0;
}
}
}
}
// seed the space.
for( x = lowerBound; x <= upperBound; ++x ){
for( y = lowerBound; y <= upperBound; ++y ){
space[0][x][y] = rand() / (double)rand();
}
}
// set halo values (sanity)
for( i = 0; i < cmdLineArgs.problemSize + 2; ++i){
space[0][i][0] = 0;
space[1][i][0] = 0;
space[0][i][cmdLineArgs.problemSize + 1] = 0;
space[1][i][cmdLineArgs.problemSize + 1] = 0;
space[0][0][i] = 0;
space[1][0][i] = 0;
space[0][cmdLineArgs.problemSize + 1][i] = 0;
space[1][cmdLineArgs.problemSize + 1][i] = 0;
}
// 3 - jacobi 2D timed within an openmp loop
double start_time = omp_get_wtime();
int t,read=0,write=1;
int lbplusx = lowerBound+cmdLineArgs.tile_len_x;
int xtmp = cmdLineArgs.tile_len_x;
int lbplusy = lowerBound+cmdLineArgs.tile_len_y;
int ytmp = cmdLineArgs.tile_len_y;
for( t = 1; t <= cmdLineArgs.T; ++t ){
#pragma omp parallel for private( x, y ) schedule(dynamic)
for(itx=0; itx< tileCountX; itx++){
for(ity=0; ity < tileCountY; ity++ ){
int xlb = lowerBound+itx*xtmp;
int xub = min(upperBound,lbplusx+itx*xtmp);
for( x = xlb; x <= xub; ++x ){
int ylb = lowerBound+ity*ytmp;
int yub = min(lbplusy+ity*ytmp, upperBound);
for( y = ylb; y <= yub;++y ){
STENCIL( read, write, x, y);
}
}
}
}
read = write;
write = 1 - write;
}
double end_time = omp_get_wtime();
double time = (end_time - start_time);
// 4 - output and optional verification
if( cmdLineArgs.printtime ){
/*
printf( "Threads: %d, P: %d, Tile: %d,%d, ",cmdLineArgs.cores,
cmdLineArgs.problemSize,
cmdLineArgs.tile_len_x,
cmdLineArgs.tile_len_y);
*/
printf( "Time: %f", time );
}
if( cmdLineArgs.verify ){
if(!verifyResultJacobi2D(space[cmdLineArgs.T & 1],cmdLineArgs.problemSize,
cmdLineArgs.globalSeed,cmdLineArgs.T )){
fprintf(stderr,"FAILURE\n");
}else{
fprintf(stderr,"SUCCESS\n");
}
}
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 16;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
core_dlacpy_band.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlacpy_band.c, normal z -> d, Fri Sep 28 17:38:19 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
/*******************************************************************************
*
* @ingroup core_double
*
* plasma_core_dlacpy copies a sub-block A of a band matrix stored in LAPACK's band format
* to a corresponding sub-block B of a band matrix in PLASMA's band format
*
*******************************************************************************
*
* @param[in] it
* The row block index of the tile.
*
* @param[in] jt
* The column block index of the tile.
*
* @param[in] m
* The number of rows of the matrices A and B. M >= 0.
*
* @param[in] n
* The number of columns of the matrices A and B. N >= 0.
*
* @param[in] A
* The M-by-N matrix to copy.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,M).
*
* @param[out] B
* The M-by-N copy of the matrix A.
* On exit, B = A ONLY in the locations specified by uplo.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,M).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_dlacpy_lapack2tile_band(plasma_enum_t uplo,
int it, int jt,
int m, int n, int nb, int kl, int ku,
const double *A, int lda,
double *B, int ldb)
{
int i, j;
int j_start, j_end;
if (uplo == PlasmaGeneral) {
j_start = 0; // pivot back and could fill in
j_end = (jt <= it ? n : imin(n, (it-jt)*nb+m+ku+kl+1));
}
else if (uplo == PlasmaUpper) {
j_start = 0;
j_end = imin(n, (it-jt)*nb+m+ku+1);
}
else {
j_start = imax(0, (it-jt)*nb-kl);
j_end = n;
}
for (j = 0; j < j_start; j++) {
for (i = 0; i < m; i++) {
B[i + j*ldb] = 0.0;
}
}
for (j = j_start; j < j_end; j++) {
int i_start, i_end;
if (uplo == PlasmaGeneral) {
i_start = (jt <= it ? 0 : imax(0, (jt-it)*nb+j-ku-kl));
i_end = (jt >= it ? m : imin(m, (jt-it)*nb+j+kl+nb+1));
// +nb because we use dgetrf on panel and pivot back within the panel.
// so the last tile in panel could fill.
}
else if (uplo == PlasmaUpper) {
i_start = imax(0, (jt-it)*nb+j-ku);
i_end = imin(m, (jt-it)*nb+j+1);
}
else {
i_start = imax(0, (jt-it)*nb+j);
i_end = imin(m, (jt-it)*nb+j+kl+1);
}
for (i = 0; i < i_start; i++) {
B[i + j*ldb] = 0.0;
}
for (i = i_start; i < i_end; i++) {
B[i + j*ldb] = A[i + j*lda];
}
for (i = i_end; i < m; i++) {
B[i + j*ldb] = 0.0;
}
}
for (j = j_end; j < n; j++) {
for (i = 0; i < m; i++) {
B[i + j*ldb] = 0.0;
}
}
}
/******************************************************************************/
void plasma_core_omp_dlacpy_lapack2tile_band(plasma_enum_t uplo,
int it, int jt,
int m, int n, int nb, int kl, int ku,
const double *A, int lda,
double *B, int ldb)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:B[0:ldb*n])
plasma_core_dlacpy_lapack2tile_band(uplo,
it, jt, m, n, nb, kl, ku,
A, lda,
B, ldb);
}
/*******************************************************************************
*
* @ingroup core_double
*
* plasma_core_dlacpy copies all or part of a two-dimensional matrix A to another
* matrix B
*
*******************************************************************************
*
* @param[in] it
* The row block index of the tile.
*
* @param[in] jt
* The column block index of the tile.
*
* @param[in] m
* The number of rows of the matrices A and B. m >= 0.
*
* @param[in] n
* The number of columns of the matrices A and B. n >= 0.
*
* @param[in] A
* The m-by-n matrix to copy.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1, m).
*
* @param[out] B
* The m-by-n copy of the matrix A.
* On exit, B = A ONLY in the locations specified by uplo.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1, m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_dlacpy_tile2lapack_band(plasma_enum_t uplo,
int it, int jt,
int m, int n, int nb, int kl, int ku,
const double *B, int ldb,
double *A, int lda)
{
int i, j;
int j_start, j_end;
if (uplo == PlasmaGeneral) {
j_start = 0; // pivot back and could fill in
j_end = (jt <= it ? n : imin(n, (it-jt)*nb+m+ku+kl+1));
}
else if (uplo == PlasmaUpper) {
j_start = 0;
j_end = imin(n, (it-jt)*nb+m+ku+1);
}
else {
j_start = imax(0, (it-jt)*nb-kl);
j_end = n;
}
for (j = j_start; j < j_end; j++) {
int i_start, i_end;
if (uplo == PlasmaGeneral) {
i_start = (jt <= it ? 0 : imax(0, (jt-it)*nb+j-ku-kl));
i_end = (jt >= it ? m : imin(m, (jt-it)*nb+j+kl+nb+1));
// +nb because we use dgetrf on panel and pivot back within the panel.
// so the last tile in panel could fill.
}
else if (uplo == PlasmaUpper) {
i_start = imax(0, (jt-it)*nb+j-ku);
i_end = imin(m, (jt-it)*nb+j+1);
}
else {
i_start = imax(0, (jt-it)*nb+j);
i_end = imin(m, (jt-it)*nb+j+kl+1);
}
for (i = i_start; i < i_end; i++) {
A[i + j*lda] = B[i + j*ldb];
}
}
}
/******************************************************************************/
void plasma_core_omp_dlacpy_tile2lapack_band(plasma_enum_t uplo,
int it, int jt,
int m, int n, int nb, int kl, int ku,
const double *B, int ldb,
double *A, int lda)
{
#pragma omp task depend(in:B[0:ldb*n]) \
depend(out:A[0:lda*n])
plasma_core_dlacpy_tile2lapack_band(uplo,
it, jt, m, n, nb, kl, ku,
B, ldb,
A, lda);
}
|
fig4.68-master.c | /*
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
Copyright 2009 Sun Microsystems, Inc. All rights reserved.
The contents of this file are subject to the terms of the BSD License("BSD")(the "License").
You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt
The BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Sun Microsystems, Inc. or the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided "AS IS," without a warranty of any kind. ALL
EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND
ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A
RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES.
IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT
OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
You acknowledge that this software is not designed, licensed or intended for
use in the design, construction, operation or maintenance of any nuclear facility.
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#endif
int main()
{
int n = 9;
int i, a, b[n];
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(4);
#endif
for (i=0; i<n; i++)
b[i] = -1;
#pragma omp parallel shared(a,b) private(i)
{
#pragma omp master
{
a = 10;
printf("Master construct is executed by thread %d\n",
omp_get_thread_num());
}
#pragma omp barrier
#pragma omp for
for (i=0; i<n; i++)
b[i] = a;
} /*-- End of parallel region --*/
printf("After the parallel region:\n");
for (i=0; i<n; i++)
printf("b[%d] = %d\n",i,b[i]);
return(0);
}
|
GB_unop__identity_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_fp64)
// op(A') function: GB (_unop_tran__identity_bool_fp64)
// C type: bool
// A type: double
// cast: bool cij = (aij != 0)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (aij != 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (aij != 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_fp64)
(
bool *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
bool z = (aij != 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
bool z = (aij != 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
explicit_dt.h | /*
==============================================================================
KratosPFEMApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: anonymous $
// Date: $Date: 2008-11-19 15:38:01 $
// Revision: $Revision: 1.1 $
//
//
#if !defined(KRATOS_EXPLICIT_DT_INCLUDED)
#define KRATOS_EXPLICIT_DT_INCLUDED
#include <string>
#include <iostream>
#include <algorithm>
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "utilities/geometry_utilities.h"
#include "geometries/triangle_2d_3.h"
#include "utilities/openmp_utils.h"
//#include "kratos/applications/MeshingApplication/meshing_application.h"
namespace Kratos
{
class ExplicitDtProcess
: public Process
{
public:
ExplicitDtProcess(double CFL,double min_dt,double max_dt, ModelPart& ThisModelPart )
:Process(), cfl(CFL),Min_dt(min_dt),Max_dt(max_dt), mr_model_part(ThisModelPart)
{
}
/// Destructor.
virtual ~ExplicitDtProcess()
{
}
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
virtual void Execute()
{
KRATOS_TRY
int NumThreads = OpenMPUtils::GetNumThreads();
std::vector< double > Threads_dt(NumThreads,10.0);
ModelPart::ElementsContainerType::iterator elem_bg = mr_model_part.ElementsBegin();
int n_elems = mr_model_part.Elements().size();
#pragma omp parallel for firstprivate(n_elems, elem_bg)
for(int ii=0; ii<n_elems; ++ii)
{
//calculate min_dt
ModelPart::ElementsContainerType::iterator elem = elem_bg + ii;
double calc_dt = 1.0;
elem->Calculate(DELTA_TIME, calc_dt, mr_model_part.GetProcessInfo());
int k = OpenMPUtils::ThisThread();
if(calc_dt < Threads_dt[k])
Threads_dt[k] = calc_dt;
}
#pragma omp barrier
//KRATOS_WATCH(omp_get_thread_num());
KRATOS_WATCH(NumThreads);
double DT = Max_dt;
for(int kk=0; kk<NumThreads; ++kk)
if( Threads_dt[kk] < DT)
DT = Threads_dt[kk];
if(DT < Min_dt) DT = Min_dt;
// double DT = 0.00000001;
DT*=cfl;
mr_model_part.GetProcessInfo()[DELTA_TIME] = DT;
KRATOS_WATCH("ExplicitDeltaT");
KRATOS_WATCH(DT);
// return DT;
KRATOS_WATCH("++++++++++++++++++++END OF ExplicitDtProcess PROCESS ^^^^^^^^^^^^^^^^^^^^^^");
KRATOS_CATCH("")
}
private:
double cfl,Min_dt,Max_dt;
ModelPart& mr_model_part;
};
}//namespace kratos
#endif
|
shared-clauseModificado.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
main()
{
int i, n = 7;
int a[n];
for (i=0; i<n; i++)
a[i] = i+1;
#pragma omp parallel for shared(a,n) default(none)
for (i=0; i<n; i++) a[i] += i;
printf("Después de parallel for:\n");
for (i=0; i<n; i++)
printf("a[%d] = %d\n",i,a[i]);
}
|
LAGraph_cc_fastsv2.c | /*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
/**
* Code is based on the algorithm described in the following paper
* Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component
* Algorithm with Fast Convergence (SIAM PP20)
*
* Modified by Tim Davis, Texas A&M University
**/
// The input matrix A must be symmetric. Self-edges (diagonal entries) are
// OK, and are ignored. The values and type of A are ignored; just its
// pattern is accessed.
#define LAGRAPH_EXPERIMENTAL_ASK_BEFORE_BENCHMARKING
#include "LAGraph.h"
static inline void atomic_min_uint64
(
uint64_t *p, // input/output
uint64_t value // input
)
{
uint64_t old, new ;
do
{
// get the old value at (*p)
#pragma omp atomic read
old = (*p) ;
// compute the new minimum
new = LAGRAPH_MIN (old, value) ;
}
while (!__sync_bool_compare_and_swap (p, old, new)) ;
}
#define LAGRAPH_FREE_ALL
//------------------------------------------------------------------------------
// Reduce_assign: w (index) += src
//------------------------------------------------------------------------------
// mask = NULL, accumulator = GrB_MIN_UINT64, descriptor = NULL
// Duplicates are summed with the accumulator, which differs from how
// GrB_assign works.
static GrB_Info Reduce_assign
(
GrB_Vector w, // vector of size n, all entries present
GrB_Vector src, // vector of size n, all entries present
GrB_Index *index, // array of size n
GrB_Index n,
GrB_Index *I, // size n, containing [0, 1, 2, ..., n-1]
GrB_Index *mem,
int nthreads
)
{
GrB_Index nw, ns;
LAGr_Vector_nvals(&nw, w);
LAGr_Vector_nvals(&ns, src);
GrB_Index *sval = mem, *wval = sval + nw;
LAGr_Vector_extractTuples(NULL, wval, &nw, w);
LAGr_Vector_extractTuples(NULL, sval, &ns, src);
#if 0
if (nthreads >= 4)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index i = 0; i < n; i++)
{
atomic_min_uint64 (&(wval [index [i]]), sval [i]) ;
// if (sval[i] < wval[index[i]])
// wval[index[i]] = sval[i];
}
}
else
#endif
{
for (GrB_Index i = 0; i < n; i++)
{
if (sval[i] < wval[index[i]])
wval[index[i]] = sval[i];
}
}
LAGr_Vector_clear(w);
LAGr_Vector_build(w, I, wval, nw, GrB_PLUS_UINT64);
return GrB_SUCCESS;
}
#undef LAGRAPH_FREE_ALL
#define LAGRAPH_FREE_ALL \
{ \
LAGRAPH_FREE (I); \
LAGRAPH_FREE (V); \
LAGRAPH_FREE (mem); \
LAGr_free (&f) ; \
LAGr_free (&gp); \
LAGr_free (&mngp); \
LAGr_free (&gp_new); \
LAGr_free (&mod); \
if (sanitize) LAGr_free (&S); \
}
//------------------------------------------------------------------------------
// LAGraph_cc_fastsv2
//------------------------------------------------------------------------------
GrB_Info LAGraph_cc_fastsv2
(
GrB_Vector *result, // output: array of component identifiers
GrB_Matrix A, // input matrix
bool sanitize // if true, ensure A is symmetric
)
{
GrB_Info info;
GrB_Index n, *mem = NULL, *I = NULL, *V = NULL ;
GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ;
GrB_Matrix S = NULL ;
LAGr_Matrix_nrows (&n, A) ;
if (sanitize)
{
// S = A | A'
LAGr_Matrix_new (&S, GrB_BOOL, n, n) ;
LAGr_eWiseAdd (S, NULL, NULL, GrB_LOR, A, A, LAGraph_desc_otoo) ;
}
else
{
// Use the input as-is, and assume it is symmetric
S = A ;
}
// determine # of threads to use for Reduce_assign
int nthreads_max = LAGraph_get_nthreads ( ) ;
int nthreads = n / (1024*1024) ;
nthreads = LAGRAPH_MIN (nthreads, nthreads_max) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
// vectors
LAGr_Vector_new(&f, GrB_UINT64, n);
LAGr_Vector_new(&gp_new, GrB_UINT64, n);
LAGr_Vector_new(&mod, GrB_BOOL, n);
// temporary arrays
I = LAGraph_malloc (n, sizeof(GrB_Index));
V = LAGraph_malloc (n, sizeof(uint64_t)) ;
mem = (GrB_Index*) LAGraph_malloc (2*n, sizeof(GrB_Index)) ;
// prepare vectors
for (GrB_Index i = 0; i < n; i++)
I[i] = V[i] = i;
LAGr_Vector_build (f, I, V, n, GrB_PLUS_UINT64);
LAGr_Vector_dup (&gp, f);
LAGr_Vector_dup (&mngp,f);
// main computation
bool diff = true ;
while (diff)
{
// hooking & shortcutting
LAGr_mxv (mngp, 0, GrB_MIN_UINT64, GxB_MIN_SECOND_UINT64, S, gp, 0);
LAGRAPH_OK (Reduce_assign (f, mngp, V, n, I, mem, nthreads));
LAGr_eWiseMult (f, 0, 0, GrB_MIN_UINT64, f, mngp, 0);
LAGr_eWiseMult (f, 0, 0, GrB_MIN_UINT64, f, gp, 0);
// calculate grandparent
LAGr_Vector_extractTuples (NULL, V, &n, f);
LAGr_extract (gp_new, 0, 0, f, V, n, 0);
// check termination
LAGr_eWiseMult (mod, 0, 0, GrB_NE_UINT64, gp_new, gp, 0);
LAGr_reduce (&diff, 0, GxB_LOR_BOOL_MONOID, mod, 0);
// swap gp and gp_new
GrB_Vector t = gp ; gp = gp_new ; gp_new = t ;
}
// free workspace and return result
*result = f;
f = NULL ;
LAGRAPH_FREE_ALL ;
return GrB_SUCCESS;
}
|
concat_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: jjzeng@openailab.com
*/
#include <math.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "concat_param.h"
struct shape_dim
{
int dim[4];
float scale;
int zero;
};
struct concat_op_param
{
struct shape_dim* input_shape;
int input_counts;
int input_dim;
struct shape_dim output_shape;
int output_dim;
int axis;
float out_scale;
void** input_data;
};
static int ref_concat_fp32(const float** in_data, float* out_data, const struct concat_op_param* param, int num_thread)
{
int axis = param->axis;
int concat_dim = 0;
for (int ii = 0; ii < param->input_counts; ++ii)
{
concat_dim += param->input_shape[ii].dim[axis];
}
if (concat_dim != param->output_shape.dim[axis])
{
fprintf(stderr, "concant dimensions[%d] is not same output[%d]\n", concat_dim, param->output_shape.dim[axis]);
return -1;
}
int out_size, in_size;
out_size = 1;
for (int ii = 0; ii < axis; ++ii)
{
out_size *= param->output_shape.dim[ii];
}
in_size = 1;
for (int ii = axis + 1; ii < param->output_dim; ++ii)
{
in_size *= param->input_shape[0].dim[ii];
}
float* output_ptr = out_data;
for (int k = 0; k < out_size; ++k)
{
// #pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < param->input_counts; ++j)
{
int cp_size = param->input_shape[j].dim[axis] * in_size;
memcpy(output_ptr, in_data[j] + k * cp_size, cp_size * sizeof(float));
output_ptr += cp_size;
}
}
return 0;
}
static int ref_concat_uint8(const uint8_t** in_data, uint8_t* out_data, const struct concat_op_param* param,
int num_thread)
{
int axis = param->axis;
int concat_dim = 0;
for (int ii = 0; ii < param->input_counts; ++ii)
{
concat_dim += param->input_shape[ii].dim[axis];
}
if (concat_dim != param->output_shape.dim[axis])
{
fprintf(stderr, "concat dimensions is not same output: ( %d -- %d )\n", concat_dim, param->output_shape.dim[axis]);
return -1;
}
int outer_size, in_size;
outer_size = 1;
for (int ii = 0; ii < axis; ++ii)
{
outer_size *= param->output_shape.dim[ii];
}
in_size = 1;
for (int ii = axis + 1; ii < param->output_dim; ++ii)
{
in_size *= param->output_shape.dim[ii];
}
int output_size = 1;
for (int ii = 0; ii < param->output_dim; ++ii)
{
output_size *= param->output_shape.dim[ii];
}
uint8_t* output_ptr = out_data;
float out_scale = param->output_shape.scale;
uint8_t out_zero = param->output_shape.zero;
for (int k = 0; k < outer_size; ++k)
{
for (int j = 0; j < param->input_counts; ++j)
{
int cp_size = param->input_shape[j].dim[axis] * in_size;
float scale = param->input_shape[j].scale;
uint8_t input_zero = param->input_shape[j].zero;
const uint8_t* input_ptr = ( const uint8_t* )(in_data[j] + k * cp_size);
if (scale == out_scale && input_zero == out_zero)
{
memcpy(output_ptr, input_ptr, cp_size);
}
else
{
float t_scale = scale / out_scale;
for (int ii = 0; ii < cp_size; ++ii)
{
output_ptr[ii] = round((input_ptr[ii] - input_zero) * t_scale) + out_zero;
}
}
output_ptr += cp_size;
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct concat_op_param* concat_op_param = ( struct concat_op_param* )sys_malloc(sizeof(struct concat_op_param));
concat_op_param->axis = 0;
concat_op_param->input_counts = 1;
concat_op_param->input_dim = 1;
concat_op_param->input_shape = NULL;
concat_op_param->out_scale = 0.1f;
concat_op_param->output_dim = 1;
exec_node->ops_priv = concat_op_param;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
sys_free(exec_node->ops_priv);
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* output_tensor;
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv;
struct concat_param* concat_param = ( struct concat_param* )ir_node->op.param_mem;
concat_op_param->axis = concat_param->axis;
concat_op_param->input_counts = ir_node->input_num;
concat_op_param->input_shape = ( struct shape_dim* )sys_malloc(sizeof(struct shape_dim) * ir_node->input_num);
concat_op_param->output_dim = output_tensor->dim_num;
for (int ii = 0; ii < output_tensor->dim_num; ii++)
{
concat_op_param->output_shape.dim[ii] = output_tensor->dims[ii];
concat_op_param->output_shape.scale = output_tensor->scale;
concat_op_param->output_shape.zero = output_tensor->zero_point;
}
concat_op_param->input_data = ( void* )sys_malloc(sizeof(void*) * ir_node->input_num);
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv;
void* out_data = output_tensor->data;
for (int i = 0; i < ir_node->input_num; i++)
{
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[i]);
int number = input_tensor->dim_num;
for (int j = 0; j < number; j++)
{
concat_op_param->input_shape[i].dim[j] = input_tensor->dims[j];
concat_op_param->input_shape[i].scale = input_tensor->scale;
concat_op_param->input_shape[i].zero = input_tensor->zero_point;
}
concat_op_param->input_data[i] = input_tensor->data;
}
if (input_tensor->data_type == TENGINE_DT_FP32)
ref_concat_fp32(( const float** )concat_op_param->input_data, out_data, concat_op_param,
exec_graph->num_thread);
else
ref_concat_uint8(( const uint8_t** )concat_op_param->input_data, out_data, concat_op_param,
exec_graph->num_thread);
return 0;
}
static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv;
sys_free(concat_op_param->input_shape);
sys_free(concat_op_param->input_data);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = postrun,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_concat_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_CONCAT, &hcl_node_ops);
}
static int unreg_concat_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_CONCAT, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_concat_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_concat_hcl_ops);
|
inference.c | #include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <sys/time.h>
#include <assert.h>
#include <string.h>
/* Optionally include OpenMP with the -fopenmp flag */
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "include/lbfgs.h"
#include "include/twister.h"
#include "include/plm.h"
#include "include/inference.h"
/* Internal prototypes */
numeric_t ElapsedTime(struct timeval *start);
/* Numerical bounds for ZeroAPCPriors */
#define LAMBDA_J_MIN 1E-2
#define LAMBDA_J_MAX 1E4
#define REGULARIZATION_GROUP_EPS 0.001
/* Internal to InferPairModel:
MAP estimation of parameters by L-BFGS */
void EstimatePairModelMAP(numeric_t *x, numeric_t *lambdas, alignment_t *ali,
options_t *options);
/* Internal to EstimatePairModelMAP:
Stochastic optimization with SGD */
typedef numeric_t (*gradfun_t) (void *data, const numeric_t *x, numeric_t *g,
const int n);
void SGDOptimize(gradfun_t grad, void *data, numeric_t *x, const int n,
const int maxIter, const numeric_t crit);
numeric_t SGDWrapperPLM(void *data, const numeric_t *x, numeric_t *g, const int n);
/* Internal to EstimatePairModelMAP:
Objective functions for point parameter estimates (MAP) */
static lbfgsfloatval_t PLMNegLogPosterior(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step);
static lbfgsfloatval_t PLMNegLogPosteriorGapReduce(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step);
static lbfgsfloatval_t PLMNegLogPosteriorBlock(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step);
static lbfgsfloatval_t PLMNegLogPosteriorDO(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step);
/* Internal to EstimatePairModelMAP: progress reporting */
static int ReportProgresslBFGS(void *instance, const lbfgsfloatval_t *x,
const lbfgsfloatval_t *g, const lbfgsfloatval_t fx,
const lbfgsfloatval_t xnorm, const lbfgsfloatval_t gnorm,
const lbfgsfloatval_t step, int n, int k, int ls);
/* Internal to EstimatePairModelMAP: parameter processing */
void PreCondition(const lbfgsfloatval_t *x, lbfgsfloatval_t *g,
alignment_t *ali, options_t *options);
lbfgsfloatval_t PostCondition(const lbfgsfloatval_t *x, lbfgsfloatval_t *g, lbfgsfloatval_t fx,
alignment_t *ali, options_t *options);
void ZeroAPCPriors(alignment_t *ali, options_t *options, numeric_t *lambdas,
lbfgsfloatval_t *x);
/* Internal to EstimatePairModelMAP: utility functions to L-BFGS */
const char *LBFGSErrorString(int ret);
numeric_t *InferPairModel(alignment_t *ali, options_t *options) {
/* Estimate the parameters of a maximum entropy model for a
multiple sequence alignment */
/* Initialize the regularization parameters */
numeric_t *lambdas =
(numeric_t *) malloc((ali->nSites + ali->nSites * (ali->nSites - 1) / 2)
* sizeof(numeric_t));
for (int i = 0; i < ali->nSites; i++) lambdaHi(i) = options->lambdaH;
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
lambdaEij(i, j) = options->lambdaE;
/* For gap-reduced problems, eliminate the gaps and reduce the alphabet */
if (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE) {
ali->nCodes = strlen(ali->alphabet) - 1;
for (int i = 0; i < ali->nSites; i++)
for (int s = 0; s < ali->nSeqs; s++)
seq(s, i) -= 1;
}
/* Initialize parameters */
ali->nParams = ali->nSites * ali->nCodes
+ ali->nSites * (ali->nSites - 1) / 2 * ali->nCodes * ali->nCodes;
numeric_t *x = (numeric_t *) malloc(sizeof(numeric_t) * ali->nParams);
if (x == NULL) {
fprintf(stderr,
"ERROR: Failed to allocate a memory block for variables.\n");
exit(1);
}
for (int i = 0; i < ali->nParams; i++) x[i] = 0.0;
/* Initialize site parameters with the ML estimates
hi = log(fi) + C
A single pseudocount is added for stability
(Laplace's rule or Morcos et al. with lambda = nCodes) */
if (options->zeroAPC != 1) {
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++)
xHi(i, ai) = log(fi(i, ai) * ali->nEff + 1.0);
/* Zero-sum gauge */
for (int i = 0; i < ali->nSites; i++) {
numeric_t hSum = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++) hSum += xHi(i, ai);
numeric_t hShift = hSum / (numeric_t) ali->nCodes;
for (int ai = 0; ai < ali->nCodes; ai++)
xHi(i, ai) -= hShift;
}
}
switch(options->estimator) {
/* Point estimates */
case INFER_MAP:
/* Maximum a posteriori estimates of model parameters */
EstimatePairModelMAP(x, lambdas, ali, options);
break;
/* For: future alternative estimators */
default:
/* Maximum a posteriori estimates of model parameters */
EstimatePairModelMAP(x, lambdas, ali, options);
}
/* Restore the alignment encoding after inference */
if (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE) {
for (int i = 0; i < ali->nSites; i++)
for (int s = 0; s < ali->nSeqs; s++)
seq(s, i) += 1;
}
return (numeric_t *) x;
}
void EstimatePairModelMAP(numeric_t *x, numeric_t *lambdas, alignment_t *ali,
options_t *options) {
/* Computes Maximum a posteriori (MAP) estimates for the parameters of
and undirected graphical model by L-BFGS */
/* Start timer */
gettimeofday(&ali->start, NULL);
/* Initialize L-BFGS */
lbfgs_parameter_t param;
lbfgs_parameter_init(¶m);
param.epsilon = 1E-3;
param.max_iterations = options->maxIter; /* 0 is unbounded */
/* Estimate parameters by optimization */
static lbfgs_evaluate_t algo;
switch(options->estimatorMAP) {
case INFER_MAP_PLM:
algo = PLMNegLogPosterior;
break;
case INFER_MAP_PLM_GAPREDUCE:
algo = PLMNegLogPosteriorGapReduce;
break;
case INFER_MAP_PLM_BLOCK:
algo = PLMNegLogPosteriorBlock;
break;
case INFER_MAP_PLM_DROPOUT:
algo = PLMNegLogPosteriorDO;
break;
default:
algo = PLMNegLogPosterior;
}
if (options->zeroAPC == 1) fprintf(stderr,
"Estimating coupling hyperparameters le = 1/2 inverse variance\n");
/* Problem instance in void array */
void *d[3] = {(void *)ali, (void *)options, (void *)lambdas};
if (options->sgd == 1) {
/* Scale hyperparams for minibatch */
numeric_t scale = (numeric_t) options->sgdBatchSize / ali->nEff;
options->lambdaGroup *= scale;
for (int i = 0; i < ali->nSites; i++) lambdaHi(i) *= scale;
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
lambdaEij(i, j) *= scale;
/* SGD optimization */
numeric_t crit = 0.01;
void *d[4] = {(void *)ali, (void *)options, (void *)lambdas, (void *) algo};
SGDOptimize(SGDWrapperPLM, d, x, ali->nParams, options->maxIter, crit);
/* Unscale hyperparams for minibatch */
numeric_t invScale = ali->nEff / (numeric_t) options->sgdBatchSize;
options->lambdaGroup *= invScale;
for (int i = 0; i < ali->nSites; i++) lambdaHi(i) *= invScale;
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
lambdaEij(i, j) *= invScale;
} else {
/* L-BFGS optimization */
int ret = 0;
lbfgsfloatval_t fx;
ret = lbfgs(ali->nParams, x, &fx, algo, ReportProgresslBFGS,
(void*)d, ¶m);
fprintf(stderr, "Gradient optimization: %s\n", LBFGSErrorString(ret));
}
/* Optionally re-estimate parameters with adjusted hyperparameters */
if (options->zeroAPC == 1) {
/* Form new priors on the variances */
ZeroAPCPriors(ali, options, lambdas, x);
/* Reinitialize coupling parameters */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
xEij(i, j, ai, aj) = 0.0;
/* Iterate estimation with new hyperparameter estimates */
options->zeroAPC = 2;
lbfgsfloatval_t fx2;
int ret2 = lbfgs(ali->nParams, x, &fx2, algo,
ReportProgresslBFGS, (void*)d, ¶m);
fprintf(stderr, "Gradient optimization: %s\n", LBFGSErrorString(ret2));
}
}
void SGDOptimize(gradfun_t grad, void *data, numeric_t *x, const int n,
const int maxIter, const numeric_t crit) {
/* Opitimize an objective function by Stochastic Gradient Descent (Adam)
Arguments:
grad gradient of objective
data pointer to data
x estimated parameters (length n)
n number of parameters
eps learning rate
maxIter maximum number of iterations
crit stop when ||grad|| / ||x|| < crit
*/
// numeric_t ALPHA0 = 0.001;
// numeric_t ALPHAT = 0.00001;
numeric_t BETA1 = 0.9;
numeric_t BETA2 = 0.99;
numeric_t EPSILON = 1E-8;
numeric_t *g = (numeric_t *) malloc(n * sizeof(numeric_t));
numeric_t criterion = crit + 1.0;
/* Begin profiling */
struct timeval start;
gettimeofday(&start, NULL);
/* Initialize estimates of first and second moments of the gradient */
numeric_t *meanX = (numeric_t *) malloc(n * sizeof(numeric_t));
numeric_t *meanG = (numeric_t *) malloc(n * sizeof(numeric_t));
numeric_t *squareG = (numeric_t *) malloc(n * sizeof(numeric_t));
for (int i = 0; i < n; i++) meanX[i] = 0;
for (int i = 0; i < n; i++) meanG[i] = 0;
for (int i = 0; i < n; i++) squareG[i] = 0;
/* Optimization loop */
int t = 1;
do {
/* Estimate the gradient */
for (int i = 0; i < n; i++) g[i] = 0;
numeric_t f = grad(data, x, g, n);
/* Update estimates of moments */
for (int i = 0; i < n; i++)
meanG[i] = BETA1 * meanG[i] + (1.0 - BETA1) * g[i];
for (int i = 0; i < n; i++)
squareG[i] = BETA2 * squareG[i] + (1.0 - BETA2) * g[i] * g[i];
/* Update Q with Adam learning rates */
// numeric_t schedule = ALPHA;
// numeric_t frac = (numeric_t) t / (numeric_t) maxIter;
// frac = floor(frac * 5) / 5.;
// numeric_t schedule = exp((1 - frac) * log(ALPHA0) + frac * log(ALPHAT));
// Anneal strategy #2
numeric_t schedule = 0.01 * pow(0.5, (t / 50));
// numeric_t schedule = 0.01;
numeric_t alpha = schedule
* sqrt(1.0 - pow(BETA2, (numeric_t) t))
/ (1.0 - pow(BETA1, (numeric_t) t));
for (int i = 0; i < n; i++)
x[i] -= meanG[i] * alpha / (sqrt(squareG[i]) + EPSILON);
/* Update Polyak average */
for (int i = 0; i < n; i++)
meanX[i] = BETA1 * meanX[i] + (1 - BETA1) * x[i];
/* Stopping criterion: ||grad(params)|| / ||params|| */
numeric_t paramNorm = 1E-6;
for (int i = 0; i < n; i++) paramNorm += fabs(x[i]) / (numeric_t) n;
numeric_t gradNorm = 1E-6;
for (int i = 0; i < n; i++)
gradNorm += fabs(meanG[i]) / (numeric_t) n;
criterion = gradNorm;
if (t == 1)
fprintf(stderr, "iter\ttime\tobj\t|x|\t|g|\tcrit\n");
fprintf(stderr, "%d\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f\n",
t, ElapsedTime(&start), f, paramNorm, gradNorm, criterion);
t++;
} while (t <= maxIter && criterion > crit);
// for (int i = 0; i < n; i++) x[i] = meanX[i] / ((numeric_t) t - 1);
for (int i = 0; i < n; i++) x[i] = meanX[i];
free(meanX);
free(meanG);
free(squareG);
free(g);
}
numeric_t SGDWrapperPLM(void *data, const numeric_t *x, numeric_t *g,
const int n) {
/* Wrap objective function for L-BFGS to support
minibatched Stochastic Gradient Descent (SGD)
*/
void **d = (void **)data;
alignment_t *ali = (alignment_t *) d[0];
options_t *options = (options_t *) d[1];
numeric_t *lambdas = (numeric_t *) d[2];
lbfgs_evaluate_t lbfgsfun = (lbfgs_evaluate_t) d[3];
/* Shallow copy alignment and options */
alignment_t *aliBatch = (alignment_t *) malloc(sizeof(alignment_t));
options_t *optionsBatch = (options_t *) malloc(sizeof(options_t));
*aliBatch = *ali;
*optionsBatch = *options;
/* Build CDF */
numeric_t *CDF = (numeric_t *) malloc(sizeof(numeric_t) * ali->nSeqs);
numeric_t weightSum = 0;
for (int i = 0; i < ali->nSeqs; i++) weightSum += ali->weights[i];
CDF[0] = ali->weights[0] / weightSum;
for (int i = 1; i < ali->nSeqs; i++) CDF[i] = CDF[i-1] + ali->weights[i] / weightSum;
/* Sample a batch of sequences */
int batchSize = options->sgdBatchSize;
int *indices = (int *) malloc(sizeof(int) * batchSize);
numeric_t *u = (numeric_t *) malloc(sizeof(numeric_t) * batchSize);
for (int i = 0; i < batchSize; i++) indices[i] = -1;
for (int i = 0; i < batchSize; i++) u[i] = (numeric_t) genrand_real3();
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < batchSize; i++)
if (indices[i] < 0 && u[i] <= CDF[s]) indices[i] = s;
for (int i = 0; i < batchSize; i++)
if (indices[i] < 0) indices[i] = batchSize - 1;
/* Clone mini-alignment and weights */
aliBatch->sequences =
(letter_t *) malloc(sizeof(letter_t) * batchSize * ali->nSites);
aliBatch->weights =
(numeric_t *) malloc(sizeof(numeric_t) * batchSize);
for (int i = 0; i < batchSize; i++)
aliBatch->weights[i] = 1.0;
for (int i = 0; i < batchSize; i++)
for (int j = 0; j < ali->nSites; j++)
aliBatch->sequences[j + i * ali->nSites] = seq(indices[i], j);
free(u);
free(CDF);
free(indices);
aliBatch->nSeqs = batchSize;
/* Run the wrapped objective */
void *instance[3] = {(void *)aliBatch, (void *)optionsBatch, (void *)lambdas};
numeric_t f = lbfgsfun(instance, x, g, n, 0);
/* Rescale */
numeric_t scale = weightSum / (numeric_t) batchSize;
f *= scale;
for (int i = 0; i < n; i++) g[i] *= scale;
/* Clean up */
free(aliBatch->sequences);
free(aliBatch->weights);
free(aliBatch);
free(optionsBatch);
return f;
}
static lbfgsfloatval_t PLMNegLogPosterior(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step) {
/* Compute the the negative log posterior, which is the negative
penalized log-(pseudo)likelihood and the objective for MAP inference
*/
void **d = (void **)instance;
alignment_t *ali = (alignment_t *) d[0];
options_t *options = (options_t *) d[1];
numeric_t *lambdas = (numeric_t *) d[2];
/* Initialize log-likelihood and gradient */
lbfgsfloatval_t fx = 0.0;
for (int i = 0; i < ali->nParams; i++) g[i] = 0;
/* Negative log-pseudolikelihood */
#pragma omp parallel for
for (int i = 0; i < ali->nSites; i++) {
numeric_t *H = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
numeric_t *P = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
numeric_t siteFx = 0.0;
/* Reshape site parameters and gradient into local blocks */
numeric_t *Xi = (numeric_t *) malloc(ali->nCodes * ali->nCodes
* ali->nSites * sizeof(numeric_t));
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
siteE(j, a, b) = xEij(i, j, a, b);
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
siteE(j, a, b) = xEij(i, j, a, b);
for (int a = 0; a < ali->nCodes; a++) siteH(i, a) = xHi(i, a);
numeric_t *Di = (numeric_t *) malloc(ali->nCodes * ali->nCodes
* ali->nSites * sizeof(numeric_t));
for (int d = 0; d < ali->nCodes * ali->nCodes * ali->nSites; d++)
Di[d] = 0.0;
/* Site negative conditional log likelihoods */
for (int s = 0; s < ali->nSeqs; s++) {
/* Compute potentials */
for (int a = 0; a < ali->nCodes; a++) H[a] = siteH(i, a);
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
H[a] += siteE(j, a, seq(s, j));
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
H[a] += siteE(j, a, seq(s, j));
/* Conditional distribution given sequence background */
numeric_t scale = H[0];
for (int a = 1; a < ali->nCodes; a++)
scale = (scale >= H[a] ? scale : H[a]);
for (int a = 0; a < ali->nCodes; a++) P[a] = exp(H[a] - scale);
numeric_t Z = 0;
for (int a = 0; a < ali->nCodes; a++) Z += P[a];
numeric_t Zinv = 1.0 / Z;
for (int a = 0; a < ali->nCodes; a++) P[a] *= Zinv;
/* Log-likelihood contributions are scaled by sequence weight */
numeric_t w = ali->weights[s];
siteFx -= w * log(P[seq(s, i)]);
/* Field gradient */
siteDH(i, seq(s, i)) -= w;
for (int a = 0; a < ali->nCodes; a++)
siteDH(i, a) -= -w * P[a];
/* Couplings gradient */
int ix = seq(s, i);
for (int j = 0; j < i; j++)
siteDE(j, ix, seq(s, j)) -= w;
for (int j = i + 1; j < ali->nSites; j++)
siteDE(j, ix, seq(s, j)) -= w;
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
siteDE(j, a, seq(s, j)) -= -w * P[a];
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
siteDE(j, a, seq(s, j)) -= -w * P[a];
}
/* Contribute local loglk and gradient to global */
#pragma omp critical
{
fx += siteFx;
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
dEij(i, j, a, b) += siteDE(j, a, b);
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
dEij(i, j, a, b) += siteDE(j, a, b);
for (int a = 0; a < ali->nCodes; a++) dHi(i, a) += siteDH(i, a);
free(Xi);
free(Di);
}
free(H);
free(P);
}
ali->negLogLk = fx;
/* Gaussian priors */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
dHi(i, ai) += lambdaHi(i) * 2.0 * xHi(i, ai);
fx += lambdaHi(i) * xHi(i, ai) * xHi(i, ai);
}
for (int i = 0; i < ali->nSites-1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
dEij(i, j, ai, aj) += lambdaEij(i, j)
* 2.0 * xEij(i, j, ai, aj);
fx += lambdaEij(i, j)
* xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
}
fx = PostCondition(x, g, fx, ali, options);
return fx;
}
static lbfgsfloatval_t PLMNegLogPosteriorGapReduce(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step) {
/* Compute the the negative log posterior, which is the negative
penalized log-(pseudo)likelihood and the objective for MAP inference
*/
void **d = (void **)instance;
alignment_t *ali = (alignment_t *) d[0];
options_t *options = (options_t *) d[1];
numeric_t *lambdas = (numeric_t *) d[2];
/* Initialize log-likelihood and gradient */
lbfgsfloatval_t fx = 0.0;
for (int i = 0; i < ali->nParams; i++) g[i] = 0;
/* Negative log-pseudolikelihood */
#pragma omp parallel for
for (int i = 0; i < ali->nSites; i++) {
numeric_t *H = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
numeric_t *P = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
numeric_t siteFx = 0.0;
/* Reshape site parameters and gradient into local blocks */
numeric_t *Xi = (numeric_t *) malloc(ali->nCodes * ali->nCodes
* ali->nSites * sizeof(numeric_t));
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
siteE(j, a, b) = xEij(i, j, a, b);
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
siteE(j, a, b) = xEij(i, j, a, b);
for (int a = 0; a < ali->nCodes; a++) siteH(i, a) = xHi(i, a);
numeric_t *Di = (numeric_t *) malloc(ali->nCodes * ali->nCodes
* ali->nSites * sizeof(numeric_t));
for (int d = 0; d < ali->nCodes * ali->nCodes * ali->nSites; d++)
Di[d] = 0.0;
/* Site negative conditional log likelihoods */
for (int s = 0; s < ali->nSeqs; s++) {
/* Only ungapped sites are considered in the model */
if (seq(s, i) >= 0) {
/* Compute potentials */
for (int a = 0; a < ali->nCodes; a++) H[a] = siteH(i, a);
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
if (seq(s, j) >= 0)
H[a] += siteE(j, a, seq(s, j));
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
if (seq(s, j) >= 0)
H[a] += siteE(j, a, seq(s, j));
/* Conditional distribution given sequence background */
numeric_t scale = H[0];
for (int a = 1; a < ali->nCodes; a++)
scale = (scale >= H[a] ? scale : H[a]);
for (int a = 0; a < ali->nCodes; a++) P[a] = exp(H[a] - scale);
numeric_t Z = 0;
for (int a = 0; a < ali->nCodes; a++) Z += P[a];
numeric_t Zinv = 1.0 / Z;
for (int a = 0; a < ali->nCodes; a++) P[a] *= Zinv;
/* Log-likelihood contributions are scaled by sequence weight */
numeric_t w = ali->weights[s];
siteFx -= w * log(P[seq(s, i)]);
/* Field gradient */
siteDH(i, seq(s, i)) -= w;
for (int a = 0; a < ali->nCodes; a++)
siteDH(i, a) -= -w * P[a];
/* Couplings gradient */
int ix = seq(s, i);
for (int j = 0; j < i; j++)
if (seq(s, j) >= 0)
siteDE(j, ix, seq(s, j)) -= w;
for (int j = i + 1; j < ali->nSites; j++)
if (seq(s, j) >= 0)
siteDE(j, ix, seq(s, j)) -= w;
for (int j = 0; j < i; j++)
if (seq(s, j) >= 0)
for (int a = 0; a < ali->nCodes; a++)
siteDE(j, a, seq(s, j)) -= -w * P[a];
for (int j = i + 1; j < ali->nSites; j++)
if (seq(s, j) >= 0)
for (int a = 0; a < ali->nCodes; a++)
siteDE(j, a, seq(s, j)) -= -w * P[a];
}
}
/* Contribute local loglk and gradient to global */
#pragma omp critical
{
fx += siteFx;
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
dEij(i, j, a, b) += siteDE(j, a, b);
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
dEij(i, j, a, b) += siteDE(j, a, b);
for (int a = 0; a < ali->nCodes; a++) dHi(i, a) += siteDH(i, a);
free(Xi);
free(Di);
}
free(H);
free(P);
}
ali->negLogLk = fx;
/* Gaussian priors */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
dHi(i, ai) += lambdaHi(i) * 2.0 * xHi(i, ai);
fx += lambdaHi(i) * xHi(i, ai) * xHi(i, ai);
}
for (int i = 0; i < ali->nSites-1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
dEij(i, j, ai, aj) += lambdaEij(i, j)
* 2.0 * xEij(i, j, ai, aj);
fx += lambdaEij(i, j)
* xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
}
fx = PostCondition(x, g, fx, ali, options);
return fx;
}
static lbfgsfloatval_t PLMNegLogPosteriorBlock(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step) {
/* Compute the the negative log posterior, which is the negative
penalized log-(pseudo)likelihood and the objective for MAP inference
*/
void **d = (void **)instance;
alignment_t *ali = (alignment_t *) d[0];
options_t *options = (options_t *) d[1];
numeric_t *lambdas = (numeric_t *) d[2];
/* Initialize log-likelihood and gradient */
lbfgsfloatval_t fx = 0.0;
for (int i = 0; i < ali->nParams; i++) g[i] = 0;
/* Block fields hi */
numeric_t *hi = (numeric_t *)
malloc(ali->nSites * ali->nCodes * sizeof(numeric_t));
numeric_t *gHi = (numeric_t *)
malloc(ali->nSites * ali->nCodes * sizeof(numeric_t));
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) Hi(i, ai) = xHi(i, ai);
for (int i = 0; i < ali->nSites * ali->nCodes; i++) gHi[i] = 0;
/* Block couplings eij */
numeric_t *eij = (numeric_t *) malloc(ali->nSites * ali->nSites
* ali->nCodes * ali->nCodes * sizeof(numeric_t));
numeric_t *gEij = (numeric_t *) malloc(ali->nSites * ali->nSites
* ali->nCodes * ali->nCodes * sizeof(numeric_t));
for (int i = 0; i < ali->nSites * ali->nSites * ali->nCodes * ali->nCodes;
i++) eij[i] = 0.0;
for (int i = 0; i < ali->nSites * ali->nSites * ali->nCodes * ali->nCodes;
i++) gEij[i] = 0.0;
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
Eij(j, aj, i, ai) = Eij(i, ai, j, aj) = xEij(i, j, ai, aj);
/* Negative log-pseudolikelihood */
for (int s = 0; s < ali->nSeqs; s++) {
/* Form potential for conditional log likelihoods at every site */
numeric_t *H = (numeric_t *)
malloc(ali->nCodes * ali->nSites * sizeof(numeric_t));
numeric_t *Z = (numeric_t *) malloc(ali->nSites * sizeof(numeric_t));
/* Initialize potentials with fields */
// memcpy(H, hi, ali->nSites * ali->nCodes * sizeof(numeric_t));
for(int jx = 0; jx < ali->nSites * ali->nCodes; jx++) H[jx] = hi[jx];
/* Contribute coupling block due to i, ai */
for (int i = 0; i < ali->nSites; i++) {
const letter_t ai = seq(s, i);
const numeric_t *jB = &(Eij(i, ai, 0, 0));
for(int jx = 0; jx < ali->nSites * ali->nCodes; jx++)
H[jx] += jB[jx];
}
/* Conditional log likelihoods */
for (int i = 0; i < ali->nSites * ali->nCodes; i++) H[i] = exp(H[i]);
for (int i = 0; i < ali->nSites; i++) Z[i] = 0;
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nSites; ai++) Z[i] += Hp(i, ai);
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nSites; ai++) Hp(i, ai) /= Z[i];
numeric_t seqFx = 0;
for (int i = 0; i < ali->nSites; i++)
seqFx -= ali->weights[s] * log(Hp(i, seq(s, i)));
for(int jx = 0; jx < ali->nSites * ali->nCodes; jx++)
H[jx] *= -ali->weights[s];
for (int i = 0; i < ali->nSites; i++)
gHi(i, seq(s, i)) -= ali->weights[s];
for(int jx = 0; jx < ali->nSites * ali->nCodes; jx++) gHi[jx] -= H[jx];
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i; j < ali->nSites; j++)
gEij(i, seq(s, i), j, seq(s, j)) -= ali->weights[s];
for (int i = 0; i < ali->nSites; i++) {
const letter_t ai = seq(s, i);
numeric_t *jgBlock = &(gEij(i, ai, 0, 0));
for (int jx = 0; jx < ali->nSites * ali->nCodes; jx++)
jgBlock[jx] -= H[jx];
}
free(H);
free(Z);
fx += seqFx;
}
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++)
dHi(i, ai) += gHi(i, ai);
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
dEij(i, j, ai, aj) += gEij(j, aj, i, ai) + gEij(i, ai, j, aj);
free(hi);
free(gHi);
free(eij);
free(gEij);
ali->negLogLk = fx;
/* Gaussian priors */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
dHi(i, ai) += lambdaHi(i) * 2.0 * xHi(i, ai);
fx += lambdaHi(i) * xHi(i, ai) * xHi(i, ai);
}
for (int i = 0; i < ali->nSites-1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
dEij(i, j, ai, aj) += lambdaEij(i, j)
* 2.0 * xEij(i, j, ai, aj);
fx += lambdaEij(i, j)
* xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
}
fx = PostCondition(x, g, fx, ali, options);
return fx;
}
static lbfgsfloatval_t PLMNegLogPosteriorDO(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step) {
/* Compute the the negative log posterior, which is the negative
penalized log-(pseudo)likelihood and the objective for MAP inference
*/
void **d = (void **)instance;
alignment_t *ali = (alignment_t *) d[0];
options_t *options = (options_t *) d[1];
numeric_t *lambdas = (numeric_t *) d[2];
/* Initialize log-likelihood and gradient */
lbfgsfloatval_t fx = 0.0;
for (int i = 0; i < ali->nParams; i++) g[i] = 0;
numeric_t *H = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
numeric_t *P = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
int *drop_mask = (int *) malloc(ali->nParams * sizeof(int));
for (int s = 0; s < ali->nSeqs; s++) {
/* Generate random bit mask over parameters */
for (int p = 0; p < ali->nParams; p ++)
drop_mask[p] = (int) rand() % 2;
/* Pseudolikelihood objective */
for (int i = 0; i < ali->nSites; i++) {
for (int a = 0; a < ali->nCodes; a++) H[a] = bitHi(i, a)
* xHi(i, a);
for (int a = 0; a < ali->nCodes; a++)
for (int j = 0; j < i; j++)
H[a] += bitEij(i, j, a, seq(s, j))
* xEij(i, j, a, seq(s, j));
for (int a = 0; a < ali->nCodes; a++)
for (int j = i + 1; j < ali->nSites; j++)
H[a] += bitEij(i, j, a, seq(s, j))
* xEij(i, j, a, seq(s, j));
/* Compute distribution from potential */
for (int a = 0; a < ali->nCodes; a++) P[a] = exp(H[a]);
numeric_t Z = 0;
for (int a = 0; a < ali->nCodes; a++) Z += P[a];
numeric_t Zinv = 1.0 / Z;
for (int a = 0; a < ali->nCodes; a++) P[a] *= Zinv;
/* Log-likelihood contributions */
fx -= ali->weights[s] * log(P[seq(s, i)]);
/* Field gradient */
dHi(i, seq(s, i)) -= bitHi(i, seq(s, i)) * ali->weights[s];
for (int a = 0; a < ali->nCodes; a++)
dHi(i, a) -= -bitHi(i, a) * ali->weights[s] * P[a];
/* Couplings gradient */
for (int j = 0; j < i; j++)
dEij(i, j, seq(s, i), seq(s, j)) -=
bitEij(i, j, seq(s, i), seq(s, j)) * ali->weights[s];
for (int j = i + 1; j < ali->nSites; j++)
dEij(i, j, seq(s, i), seq(s, j)) -=
bitEij(i, j, seq(s, i), seq(s, j)) * ali->weights[s];
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
dEij(i, j, a, seq(s, j)) -=
-bitEij(i, j, a, seq(s, j)) * ali->weights[s] * P[a];
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
dEij(i, j, a, seq(s, j)) -=
-bitEij(i, j, a, seq(s, j)) * ali->weights[s] * P[a];
}
}
free(H);
free(P);
free(drop_mask);
ali->negLogLk = fx;
/* Gaussian priors */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
dHi(i, ai) += lambdaHi(i) * 2.0 * xHi(i, ai);
fx += lambdaHi(i) * xHi(i, ai) * xHi(i, ai);
}
for (int i = 0; i < ali->nSites-1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
dEij(i, j, ai, aj) += lambdaEij(i, j)
* 2.0 * xEij(i, j, ai, aj);
fx += lambdaEij(i, j)
* xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
}
fx = PostCondition(x, g, fx, ali, options);
return fx;
}
static int ReportProgresslBFGS(void *instance, const lbfgsfloatval_t *x,
const lbfgsfloatval_t *g, const lbfgsfloatval_t fx,
const lbfgsfloatval_t xnorm, const lbfgsfloatval_t gnorm,
const lbfgsfloatval_t step, int n, int k, int ls) {
void **d = (void **)instance;
alignment_t *ali = (alignment_t *)d[0];
/* Compute norms of relevant parameters */
lbfgsfloatval_t hNorm = 0.0, eNorm = 0.0, hGNorm = 0.0, eGNorm = 0.0;
for (int i = 0; i < ali->nSites * ali->nCodes; i++)
hNorm += x[i]*x[i];
for (int i = 0; i < ali->nSites * ali->nCodes; i++)
hGNorm += g[i]*g[i];
for (int i = ali->nSites * ali->nCodes; i < ali->nParams; i++)
eNorm += x[i]*x[i];
for (int i = ali->nSites * ali->nCodes; i < ali->nParams; i++)
eGNorm += g[i]*g[i];
hNorm = sqrt(hNorm);
hGNorm = sqrt(hGNorm);
eNorm = sqrt(eNorm);
eGNorm = sqrt(eGNorm);
/* Retrieve elapsed time */
static struct timeval now;
gettimeofday(&now, NULL);
if (now.tv_usec < ali->start.tv_usec) {
int nsec = (ali->start.tv_usec - now.tv_usec) / 1000000 + 1;
ali->start.tv_usec -= 1000000 * nsec;
ali->start.tv_sec += nsec;
}
if (now.tv_usec - ali->start.tv_usec > 1000000) {
int nsec = (now.tv_usec - ali->start.tv_usec) / 1000000;
ali->start.tv_usec += 1000000 * nsec;
ali->start.tv_sec -= nsec;
}
numeric_t elapsed = (numeric_t) (now.tv_sec - ali->start.tv_sec)
+ ((numeric_t) (now.tv_usec - ali->start.tv_usec)) / 1E6;
if (k == 1) fprintf(stderr,
"iter\ttime\tcond\tfx\t-loglk"
"\t||h||\t||e||\n");
fprintf(stderr, "%d\t%.1f\t%.2f\t%.1f\t%.1f\t%.1f\t%.1f\n",
k, elapsed, gnorm / xnorm, fx, ali->negLogLk, hNorm, eNorm);
return 0;
}
void PreCondition(const lbfgsfloatval_t *x, lbfgsfloatval_t *g, alignment_t *ali, options_t *options) {
/* Currently empty */
}
lbfgsfloatval_t PostCondition(const lbfgsfloatval_t *x, lbfgsfloatval_t *g, lbfgsfloatval_t fx, alignment_t *ali, options_t *options) {
if (options->zeroAPC == 1)
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++)
dHi(i, ai) = 0.0;
/* Group (L1/L2) regularization */
if (options->lambdaGroup > 0)
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++) {
double l2 = REGULARIZATION_GROUP_EPS;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
l2 += xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
double l1 = sqrt(l2);
fx += options->lambdaGroup * l1;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
dEij(i, j, ai, aj) += options->lambdaGroup * xEij(i, j, ai, aj) / l1;
}
return fx;
}
void ZeroAPCPriors(alignment_t *ali, options_t *options, numeric_t *lambdas,
lbfgsfloatval_t *x) {
/* Compute the variances of the couplings for each pair */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++) {
/* Mean(eij) over ai, aj */
numeric_t mean = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
mean += xEij(i, j, ai, aj);
mean *= 1.0 / ((numeric_t) ali->nCodes * ali->nCodes);
/* Var(eij) over ai, aj */
numeric_t ssq = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
ssq += (xEij(i, j, ai, aj) - mean)
* (xEij(i, j, ai, aj) - mean);
/* Use N rather than N-1 since N has better MSE */
numeric_t var = ssq / ((numeric_t) (ali->nCodes * ali->nCodes));
lambdaEij(i, j) = var;
}
/* Determine the site-wise statistics of the variances */
numeric_t nPairs = ((numeric_t) ((ali->nSites) * (ali->nSites - 1))) / 2.0;
numeric_t V_avg = 0.0;
numeric_t *V_pos_avg = (numeric_t *) malloc(ali->nSites * sizeof(numeric_t));
for (int i = 0; i < ali->nSites; i++) {
V_pos_avg[i] = 0.0;
}
for (int i = 0; i < ali->nSites - 1; i++) {
for (int j = i + 1; j < ali->nSites; j++) {
V_pos_avg[i] += lambdaEij(i, j) / (numeric_t) (ali->nSites - 1);
V_pos_avg[j] += lambdaEij(i, j) / (numeric_t) (ali->nSites - 1);
V_avg += lambdaEij(i, j) / nPairs;
}
}
/* Remove the first component of the variances */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
lambdaEij(i, j) =
lambdaEij(i, j) - V_pos_avg[i] * V_pos_avg[j] / V_avg;
/* Transform and truncate variances into lambda hyperparameters */
numeric_t pcount = 0.0;
numeric_t psum = 0.0;
numeric_t inbounds = 0;
numeric_t min = LAMBDA_J_MAX;
numeric_t max = LAMBDA_J_MIN;
for (int i = 0; i < ali->nSites - 1; i++) {
for (int j = i + 1; j < ali->nSites; j++) {
/* Lambda coefficients are 1/2 the inverse variance */
if (lambdaEij(i, j) > 0) {
lambdaEij(i, j) = 1.0 / (2.0 * lambdaEij(i, j));
psum += lambdaEij(i, j);
pcount += 1.0;
} else {
lambdaEij(i, j) = LAMBDA_J_MAX + 1.0;
}
/* Truncate lambda for numerical stability */
if (lambdaEij(i, j) >= LAMBDA_J_MIN && lambdaEij(i, j) <= LAMBDA_J_MAX)
inbounds += 1.0 / (numeric_t) ((ali->nSites)*(ali->nSites - 1) / 2.0);
if (lambdaEij(i, j) < 0 || !isfinite(lambdaEij(i, j)))
lambdaEij(i, j) = LAMBDA_J_MAX;
if (lambdaEij(i, j) < LAMBDA_J_MIN) lambdaEij(i, j) = LAMBDA_J_MIN;
if (lambdaEij(i, j) > LAMBDA_J_MAX) lambdaEij(i, j) = LAMBDA_J_MAX;
/* Track extremes */
if (lambdaEij(i, j) > max) max = lambdaEij(i, j);
if (lambdaEij(i, j) < min) min = lambdaEij(i, j);
}
}
fprintf(stderr, "Raw coupling hyperparameter statistics:\n"
"\tMean positive lambda: %f\n"
"\tPercent of ij's positive: %f\n"
"\tPercent in bounds (%f < L < %f): %f\n",
psum / pcount,
pcount / nPairs,
min, max, inbounds);
}
const char *LBFGSErrorString(int ret) {
const char *p;
switch(ret) {
case LBFGSERR_UNKNOWNERROR:
p = "UNKNOWNERROR";
break;
/** Logic error. */
case LBFGSERR_LOGICERROR:
p = "LOGICERROR";
break;
/** Insufficient memory. */
case LBFGSERR_OUTOFMEMORY:
p = "OUTOFMEMORY";
break;
/** The minimization process has been canceled. */
case LBFGSERR_CANCELED:
p = "CANCELED";
break;
/** Invalid number of variables specified. */
case LBFGSERR_INVALID_N:
p = "INVALID_N";
break;
/** Invalid number of variables (for SSE) specified. */
case LBFGSERR_INVALID_N_SSE:
p = "INVALID_N_SSE";
break;
/** The array x must be aligned to 16 (for SSE). */
case LBFGSERR_INVALID_X_SSE:
p = "INVALID_X_SSE";
break;
/** Invalid parameter lbfgs_parameter_t::epsilon specified. */
case LBFGSERR_INVALID_EPSILON:
p = "INVALID_EPSILON";
break;
/** Invalid parameter lbfgs_parameter_t::past specified. */
case LBFGSERR_INVALID_TESTPERIOD:
p = "INVALID_TESTPERIOD";
break;
/** Invalid parameter lbfgs_parameter_t::delta specified. */
case LBFGSERR_INVALID_DELTA:
p = "INVALID_DELTA";
break;
/** Invalid parameter lbfgs_parameter_t::linesearch specified. */
case LBFGSERR_INVALID_LINESEARCH:
p = "INVALID_LINESEARCH";
break;
/** Invalid parameter lbfgs_parameter_t::max_step specified. */
case LBFGSERR_INVALID_MINSTEP:
p = "INVALID_MINSTEP";
break;
/** Invalid parameter lbfgs_parameter_t::max_step specified. */
case LBFGSERR_INVALID_MAXSTEP:
p = "INVALID_MAXSTEP";
break;
/** Invalid parameter lbfgs_parameter_t::ftol specified. */
case LBFGSERR_INVALID_FTOL:
p = "INVALID_FTOL";
break;
/** Invalid parameter lbfgs_parameter_t::wolfe specified. */
case LBFGSERR_INVALID_WOLFE:
p = "INVALID_WOLFE";
break;
/** Invalid parameter lbfgs_parameter_t::gtol specified. */
case LBFGSERR_INVALID_GTOL:
p = "INVALID_GTOL";
break;
/** Invalid parameter lbfgs_parameter_t::xtol specified. */
case LBFGSERR_INVALID_XTOL:
p = "INVALID_XTOL";
break;
/** Invalid parameter lbfgs_parameter_t::max_linesearch specified. */
case LBFGSERR_INVALID_MAXLINESEARCH:
p = "INVALID_MAXLINESEARCH";
break;
/** Invalid parameter lbfgs_parameter_t::orthantwise_c specified. */
case LBFGSERR_INVALID_ORTHANTWISE:
p = "INVALID_ORTHANTWISE";
break;
/** Invalid parameter lbfgs_parameter_t::orthantwise_start specified. */
case LBFGSERR_INVALID_ORTHANTWISE_START:
p = "INVALID_ORTHANTWISE_START";
break;
/** Invalid parameter lbfgs_parameter_t::orthantwise_end specified. */
case LBFGSERR_INVALID_ORTHANTWISE_END:
p = "ORTHANTWISE_END";
break;
/** The line-search step went out of the interval of uncertainty. */
case LBFGSERR_OUTOFINTERVAL:
p = "OUTOFINTERVAL";
break;
/** A logic error occurred; alternatively: the interval of uncertainty
became too small. */
case LBFGSERR_INCORRECT_TMINMAX:
p = "INCORRECT_TMINMAX";
break;
/** A rounding error occurred; alternatively: no line-search step
satisfies the sufficient decrease and curvature conditions. */
case LBFGSERR_ROUNDING_ERROR:
p = "ROUNDING_ERROR";
break;
/** The line-search step became smaller than lbfgs_parameter_t::min_step. */
case LBFGSERR_MINIMUMSTEP:
p = "MINIMUMSTEP";
break;
/** The line-search step became larger than lbfgs_parameter_t::max_step. */
case LBFGSERR_MAXIMUMSTEP:
p = "MAXILBFGSERR_MUMSTEP";
break;
/** The line-search routine reaches the maximum number of evaluations. */
case LBFGSERR_MAXIMUMLINESEARCH:
p = "MAXIMUMLINESEARCH";
break;
/** The algorithm routine reaches the maximum number of iterations. */
case LBFGSERR_MAXIMUMITERATION:
p = "MAXIMUMITERATION";
break;
/** Relative width of the interval of uncertainty is at most
lbfgs_parameter_t::xtol. */
case LBFGSERR_WIDTHTOOSMALL:
p = "WIDTHTOOSMALL";
break;
/** A logic error (negative line-search step) occurred. */
case LBFGSERR_INVALIDPARAMETERS:
p = "INVALIDPARAMETERS";
break;
/** The current search direction increases the objective function value. */
case LBFGSERR_INCREASEGRADIENT:
p = "INCREASEGRADIENT";
break;
case 0:
p = "Minimization success";
break;
default:
p = "No detected error";
break;
}
return p;
}
numeric_t ElapsedTime(struct timeval *start) {
/* Computes the elapsed time from START to NOW in seconds */
struct timeval now;
gettimeofday(&now, NULL);
if (now.tv_usec < start->tv_usec) {
int nsec = (start->tv_usec - now.tv_usec) / 1000000 + 1;
start->tv_usec -= 1000000 * nsec;
start->tv_sec += nsec;
}
if (now.tv_usec - start->tv_usec > 1000000) {
int nsec = (now.tv_usec - start->tv_usec) / 1000000;
start->tv_usec += 1000000 * nsec;
start->tv_sec -= nsec;
}
return (numeric_t) (now.tv_sec - start->tv_sec)
+ ((numeric_t) (now.tv_usec - start->tv_usec)) / 1E6;
} |
GB_binop__land_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__land_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__land_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_fp64)
// A*D function (colscale): GB (_AxD__land_fp64)
// D*A function (rowscale): GB (_DxB__land_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__land_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__land_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_fp64)
// C=scalar+B GB (_bind1st__land_fp64)
// C=scalar+B' GB (_bind1st_tran__land_fp64)
// C=A+scalar GB (_bind2nd__land_fp64)
// C=A'+scalar GB (_bind2nd_tran__land_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_FP64 || GxB_NO_LAND_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__land_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.c |
#include "matrix.h"
typedef struct {
double* a;
int b;
} vipair;
double macheps() {
double macheps;
macheps = 1.0;
while ((macheps + 1.0) > 1.0) {
macheps = macheps / 2.0;
}
macheps = macheps * 2;
return macheps;
}
double pmax(double a, double b) {
if (a > b) {
return a;
}
else {
return b;
}
}
double pmin(double a, double b) {
if (a < b) {
return a;
}
else {
return b;
}
}
int imax(int a, int b) {
if (a > b) {
return a;
}
else {
return b;
}
}
int imin(int a, int b) {
if (a < b) {
return a;
}
else {
return b;
}
}
double signx(double x) {
double sgn;
if (x >= 0.) {
sgn = 1.0;
}
else {
sgn = -1.0;
}
return sgn;
}
double l2norm(double *vec, int N) {
double l2, sum;
int i;
sum = 0.;
for (i = 0; i < N; ++i) {
sum += vec[i] * vec[i];
}
l2 = sqrt(sum);
return l2;
}
int compare (const void* ind1, const void* ind2)
{
if (*((vipair *)ind1)->a > *((vipair *)ind2)->a)
return -1;
else if (*((vipair *)ind1)->a < *((vipair *)ind2)->a)
return 1;
else
return 0;
}
void sort1d(double* v,int N, int* pos)
{
vipair* val = NULL;
int i;
if (N <= 0)
return;
val = malloc(sizeof(vipair) * N);
for (i = 0; i < N; ++i) {
val[i].a = &v[i];
val[i].b = i;
}
qsort(val, N, sizeof(vipair), compare);
for (i = 0; i < N; ++i)
pos[i] = val[i].b;
free(val);
}
double array_max_abs(double *array,int N) {
int i;
double m = 0.0;
for (i = 0; i < N;++i) {
if (fabs(array[i]) > m ) {
m = fabs(array[i]);
}
}
return m;
}
double array_max(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] > m ) {
m = array[i];
}
}
return m;
}
double array_min(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] < m ) {
m = array[i];
}
}
return m;
}
void dtranspose(double *sig, int rows, int cols,double *col) {
int max,ud,i,k;
if (rows >= cols) {
max = cols;
} else {
max = rows;
}
ud = 0;
for (i= -rows + 1; i < cols; i++) {
if (i <= 0) {
ud++;
if (ud >= max)
ud = max;
for (k = 0; k < ud; k++) {
col[k*rows+k-i] = sig[(k-i)*cols+k];
}
} else {
if (i - cols + rows > 0) {
ud--;
if (ud >= max)
ud = max;
}
for (k = 0; k < ud; k++) {
col[(k+i)*rows+k] = sig[k*cols+k+i];
}
}
}
}
void stranspose(double *sig, int rows, int cols,double *col) {
int t,u;
register int i,j;
#pragma omp parallel for private(i,j,t,u)
for (i=0; i < rows; i++) {
t = i * cols;
u = 0;
for (j=0; j < cols; j++) {
col[u+i] = sig[j+t];
u+=rows;
}
}
}
void rtranspose(double *m, int rows, int cols,double *n, int r, int c) {
register int i,j;
int rm,cm;
int rm1,cm1,rm2,cm2;
int block;
block = (int) BLOCKSIZE;
if (rows <= block && cols <= block) {
for (i = 0; i < rows; ++i) {
for (j = 0; j < cols; ++j) {
n[i+j*r] = m[j+i*c];
//cout << *(n+i+j*r) << " ";
}
}
//cout << endl;
} else if (cols >= rows) {
rm = rows;
cm1 = (int) ceil((double) cols/2.0);
cm2 = cols - cm1;
rtranspose(m,rm,cm1,n,r,c);
rtranspose(m+cm1,rm,cm2,n+cm1*r,r,c);
} else if (rows > cols) {
rm1 = (int) ceil((double) rows/2.0);
rm2 = rows - rm1;
cm = cols;
rtranspose(m,rm1,cm,n,r,c);
rtranspose(m+rm1*c,rm2,cm,n+rm1,r,c);
}
}
void ctranspose(double *sig, int rows, int cols,double *col) {
int r,c;
int block;
block = (int) BLOCKSIZE;
r= rows;
c = cols;
if (rows >= block || cols >= block) {
rtranspose(sig,rows,cols,col,r,c);
} else {
stranspose(sig,rows,cols,col);
}
}
void mtranspose(double *sig, int rows, int cols,double *col) {
int block;
block = (int) BLOCKSIZE * 16;
if (rows >= block && cols >= block) {
ctranspose(sig,rows,cols,col);
} else {
stranspose(sig,rows,cols,col);
}
}
void itranspose(double *A, int M, int N) {
int i, j, p, iter;
double *buf;
double temp;
if (M == N) {
// M == N
for (i = 0; i < N; ++i) {
for (j = i + 1; j < N; ++j) {
temp = A[i + j*N];
A[i + j*N] = A[j + i*N];
A[j + i*N] = temp;
}
}
} else if (M > N) {
p = M - N;
buf = (double*)malloc(sizeof(double)* p * N);
memcpy(buf, A + N * N, sizeof(*A)*p*N);
for (i = 0; i < N; ++i) {
for (j = i + 1; j < N; ++j) {
temp = A[i + j*N];
A[i + j*N] = A[j + i*N];
A[j + i*N] = temp;
}
}
for (i = N - 1; i >= 1; --i) {
memmove(A + i*M, A + i*N, sizeof(*A)*M);
}
for (i = 0; i < N; ++i) {
iter = N + i * M;
for (j = 0; j < p; ++j) {
A[iter + j] = buf[j*N + i];
}
}
free(buf);
}
else if (M < N) {
p = N - M;
buf = (double*)malloc(sizeof(double)* p * M);
for (i = 0; i < M; ++i) {
iter = M + i*N;
for (j = 0; j < p; ++j) {
buf[j*M + i] = A[iter + j];
}
}
for (i = 1; i < M; ++i) {
memmove(A + i*M, A + i * N, sizeof(*A)*M);
}
for (i = 0; i < M; ++i) {
for (j = i + 1; j < M; ++j) {
temp = A[i + j*M];
A[i + j*M] = A[j + i*M];
A[j + i*M] = temp;
}
}
memcpy(A + M*M, buf, sizeof(*A)*p*M);
free(buf);
}
}
void mdisplay(double *A, int row, int col) {
int i,j;
printf("\n MATRIX Order : %d X %d \n \n",row,col);
for (i = 0; i < row; i++) {
printf("R%d: ",i);
for ( j = 0; j < col;j++) {
printf("%g ",A[i*col + j]);
}
printf(":R%d \n",i);
}
}
void madd(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A + B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
}
void msub(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A - B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] - B[i];
}
}
void scale(double *A, int rows, int cols, double alpha) {
int N,i;
/*
* A = alpha * A
* Matrix A is overwritten.
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N;++i) {
A[i] = alpha * A[i];
}
}
void nmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * B[j + k * cb];
}
}
}
}
void tmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
double *BT;
BT = (double*) malloc(sizeof(double) * ca * cb);
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
mtranspose(B,ca,cb,BT);
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * BT[k + j * rb];
}
}
}
free(BT);
}
void recmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
int m2,n2,p2;
register int i,j,k;
int u,v,t;
if (m + n + p <= CUTOFF) {
//#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sB;
u = i * sC;
t = j + u;
for (k = 0; k < n;++k) {
C[t] += A[k + v] * B[j + k * sC];
}
}
}
} else if (m >= n && m >= p) {
m2 = (int) ceil((double) m / 2.0);
recmult(A,B,C,m2,n,p,sA,sB,sC);
recmult(A + m2*sB,B,C + m2*sC,m-m2,n,p,sA,sB,sC);
} else if (n >= m && n >= p) {
n2 = (int) ceil((double) n / 2.0);
recmult(A,B,C,m,n2,p,sA,sB,sC);
recmult(A+n2,B+n2*sC,C,m,n-n2,p,sA,sB,sC);
} else if (p >= m && p >= n) {
p2 = (int) ceil((double) p / 2.0);
recmult(A,B,C,m,n,p2,sA,sB,sC);
recmult(A,B+p2,C+p2,m,n,p-p2,sA,sB,sC);
}
}
void rmult(double* A, double* B, double* C,int m,int n, int p) {
int strA,strB,strC;
int N;
register int i;
strA = m;
strB = n;
strC = p;
N = m * p;
for(i = 0; i < N; ++i) {
C[i] = 0.;
}
recmult(A,B,C,m,n,p,strA,strB,strC);
}
int findrec(int *a, int *b, int *c) {
int rec;
double da,db,dc,mul;
da = (double) *a;
db = (double) *b;
dc = (double) *c;
rec = 0;
mul = 1.;
while (da + db + dc > (double) CUTOFF) {
rec++;
mul *= 2;
da = ceil(da/2.);
db = ceil(db/2.);
dc = ceil(dc/2.);
}
*a = (int) da * mul;
*b = (int) db * mul;
*c = (int) dc * mul;
return rec;
}
void add_zero_pad(double *X, int rows, int cols, int zrow, int zcol,double *Y) {
int r,c,i,j,u,v;
r = rows + zrow;
c = cols + zcol;
for (i = 0; i < rows;++i) {
u = i*c;
v = i * cols;
for (j = 0; j < cols;++j) {
Y[u + j] = X[v + j];
}
for (j = cols; j < c;++j) {
Y[u + j] = 0.;
}
}
for (i = rows; i < r;++i) {
u = i*c;
for(j = 0; j < c;++j) {
Y[u + j] = 0.;
}
}
}
void remove_zero_pad(double *Y, int rows, int cols, int zrow, int zcol,double *Z) {
int r,c,i,j,u,v;
r = rows - zrow;
c = cols - zcol;
for (i = 0; i < r; ++i) {
u = i * c;
v = i * cols;
for (j = 0; j < c; ++j) {
Z[j + u] = Y[j + v];
}
}
}
void madd_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
}
void msub_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
}
void rmadd_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmadd_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmadd_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void rmsub_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmsub_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmsub_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void srecmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
register int i,j,k;
int u,v,t;
double sum;
double *A1,*B1;
double *a11,*a12,*a21,*a22;
double *b11,*b12,*b21,*b22;
double *c11,*c12,*c21,*c22;
double *m1,*m2,*m3,*m4,*m5,*m6,*m7;
int sm1,sm2,sm3,sm4,sm5,sm6,sm7;
int sA1,sB1;
if (m + n + p <= CUTOFF) {
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sA;
u = i * sC;
t = j + u;
sum = 0.;
for (k = 0; k < n;++k) {
sum += A[k + v] * B[j + k * sB];
}
C[t] = sum;
}
}
} else {
m/=2;n/=2;p/=2;
// A size mXn, C size mXp
a11 = A;
a12 = A + n;
a21 = A + m * sA;
a22 = A + n + m * sA;
//B size nXp
b11 = B;
b12 = B + p;
b21 = B + n * sB;
b22 = B + p + n * sB;
//C size mXp
c11 = C;
c12 = C + p;
c21 = C + m * sC;
c22 = C + p + m * sC;
// m matrices have dimension m X p each. See http://en.wikipedia.org/wiki/Strassen_algorithm
m1 = (double*) malloc(sizeof(double) *m * p);
sm1 = p;
m3 = (double*) malloc(sizeof(double) *m * p);
sm3 = p;
m4 = (double*) malloc(sizeof(double) *m * p);
sm4 = p;
m2 = c21;
sm2 = sC;
m5 = c12;
sm5 = sC;
m6 = c22;
sm6 = sC;
m7 = c11;
sm7 = sC;
//m1
sA1 = n;
sB1 = p;
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
madd_stride(a11,a22,A1,m,n,sA,sA,sA1);
madd_stride(b11,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m1,m,n,p,sA1,sB1,sm1);
free(A1);
free(B1);
//m2
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a21,a22,A1,m,n,sA,sA,sA1);
srecmult(A1,b11,m2,m,n,p,sA1,sB,sm2);
free(A1);
//m3
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b12,b22,B1,n,p,sB,sB,sB1);
srecmult(a11,B1,m3,m,n,p,sA,sB1,sm3);
free(B1);
//m4
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b21,b11,B1,n,p,sB,sB,sB1);
srecmult(a22,B1,m4,m,n,p,sA,sB1,sm4);
free(B1);
//m5
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a11,a12,A1,m,n,sA,sA,sA1);
srecmult(A1,b22,m5,m,n,p,sA1,sB,sm5);
free(A1);
//m6
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a21,a11,A1,m,n,sA,sA,sA1);
madd_stride(b11,b12,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m6,m,n,p,sA1,sB1,sm6);
free(A1);
free(B1);
//m7
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a12,a22,A1,m,n,sA,sA,sA1);
madd_stride(b21,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m7,m,n,p,sA1,sB1,sm7);
free(A1);
free(B1);
// c11
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m7,m7,m,p,sm1,sm7,sm7);
msub_stride(m4,m5,A1,m,p,sm4,sm5,sA1);
madd_stride(m7,A1,m7,m,p,sm7,sA1,sm7);
free(A1);
// c22
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m6,m6,m,p,sm1,sm6,sm6);
msub_stride(m3,m2,A1,m,p,sm3,sm2,sA1);
madd_stride(m6,A1,m6,m,p,sm6,sA1,sm6);
free(A1);
//c12
madd_stride(m3,m5,m5,m,p,sm3,sm5,sm5);
//c21
madd_stride(m4,m2,m2,m,p,sm4,sm2,sm2);
free(m1);
free(m3);
free(m4);
}
}
void smult(double* A, double* B, double* C,int m,int n, int p) {
int a,b,c,nrec;
double *X,*Y,*Z,*P;
a = m;
b = n;
c = p;
nrec = findrec(&a,&b,&c);
X = (double*) malloc(sizeof(double) * a * b);
Y = (double*) malloc(sizeof(double) * b * c);
Z = (double*) malloc(sizeof(double) * a * c);
P = (double*) malloc(sizeof(double) * (a/2) * (c/2));
add_zero_pad(A,m,n,a-m,b-n,X);
add_zero_pad(B,n,p,b-n,c-p,Y);
srecmult(X,Y,Z,a,b,c,b,c,c);
// Memory allocation needs work
remove_zero_pad(Z,a,c,a-m,c-p,C);
// free X,Y,Z
free(X);
free(Y);
free(Z);
free(P);
}
void mmult(double* A, double* B, double* C,int m,int n, int p) {
if (m+n+p <= CUTOFF/2) {
nmult(A,B,C,m,n,p);
} else {
smult(A,B,C,m,n,p);
}
}
static int pludecomp(double *A,int N,int *ipiv) {
int k,j,l,c1,c2,mind,tempi;
double ld,mult,mval,temp;
for(k=0;k < N;++k)
ipiv[k] = k;
for(k = 0; k < N-1; ++k) {
//c2 = k*N;
mval = fabs(A[k*N + k]);
mind = k;
for (j=k+1; j < N;++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if ( mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N;j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0.) {
for (j = k+1; j < N; ++j) {
c1 = j*N;
mult = A[c1+k] /= ld;
//printf("\n k %d j %d mult %lf \n",k,j,mult);
for(l = k+1; l < N; ++l) {
A[c1+l] -= mult * A[c2 + l];
}
}
}
}
return 0;
}
void ludecomp(double *A,int N,int *ipiv) {
pludecomp(A,N,ipiv);
}
int rludecomp(double *A, int M, int N, int *ipiv) {
int k, j, l, c1, c2, mind, tempi;
double ld, mult, mval, temp;
for (k = 0; k < M; ++k)
ipiv[k] = (double) k;
if (M > N) {
for (k = 0; k < N; ++k) {
mval = fabs(A[k*N + k]);
mind = k;
for (j = k + 1; j < M; ++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if (mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N; j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0. && k < N) {
for (j = k + 1; j < M; ++j) {
c1 = j*N;
mult = A[c1 + k] /= ld;
//printf("\n k %d j %d mult %f \n",k,j,mult);
for (l = k + 1; l < N; ++l) {
A[c1 + l] -= mult * A[c2 + l];
}
}
}
}
}
else if (M < N) {
for (k = 0; k < M-1; ++k) {
mval = fabs(A[k*N + k]);
mind = k;
for (j = k + 1; j < M; ++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if (mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N; j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0.) {
for (j = k + 1; j < M; ++j) {
c1 = j*N;
mult = A[c1 + k] /= ld;
//printf("\n k %d j %d mult %f \n",k,j,mult);
for (l = k + 1; l < N; ++l) {
A[c1 + l] -= mult * A[c2 + l];
}
}
}
}
}
else if (M == N) {
pludecomp(A,N,ipiv);
}
//mdisplay(ipiv, 1, M);
return 0;
}
void getPLU(double *A, int M , int N, int *ipiv,double *P, double *L, double *U) {
int i, j,k;
// Initialize all the arrays
// P - M*M
// M > N
// L - M*N , U - N*N
// M = N
// L - M*M , U M*M
// M < N
// L - M*M, U - M*N
if (P) {
for (i = 0; i < M*M; ++i) {
P[i] = 0.0;
}
for (j = 0; j < M; ++j) {
P[ipiv[j]*M + j] = 1.0;
}
}
if (M == N) {
if (L) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
L[i*M + j] = A[i*M + j];
}
L[i*M + i] = 1.0;
for (j = i + 1; j < M; ++j) {
L[i*M + j] = 0.0;
}
}
}
if (U) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
U[i*M + j] = 0.0;
}
for (j = i; j < M; ++j) {
U[i*M + j] = A[i*M + j];
}
}
}
}
else if (M > N) {
if (L) {
for (i = 0; i < N; ++i) {
for (j = 0; j < i; ++j) {
L[i*N + j] = A[i*N + j];
}
L[i*N + i] = 1.0;
for (j = i + 1; j < N; ++j) {
L[i*N + j] = 0.0;
}
}
memcpy(L + N*N, A + N*N, sizeof(double)*(M - N)*N);
}
if (U) {
for (i = 0; i < N; ++i) {
for (j = 0; j < i; ++j) {
U[i*N + j] = 0.0;
}
for (j = i; j < N; ++j) {
U[i*N + j] = A[i*N + j];
}
}
}
}
else if (M < N) {
if (L) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
L[i*M + j] = A[i*N + j];
}
L[i*M + i] = 1.0;
for (j = i + 1; j < M; ++j) {
L[i*M + j] = 0.0;
}
}
}
if (U) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
U[i*N + j] = 0.0;
}
for (j = i; j < N; ++j) {
U[i*N + j] = A[i*N + j];
}
}
}
}
}
void getPU(double *A, int M, int N, int *ipiv, double *P,double *U) {
int i,j,K;
int *ipivt;
double *L;
ipivt = (int*)malloc(sizeof(int)*M);
for (i = 0; i < M; ++i) {
ipivt[ipiv[i]] = i;
}
if (M > N) {
K = N;
}
else {
K = M;
}
L = (double*)malloc(sizeof(double)*M*K);
if (M == N) {
if (L) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
L[i*M + j] = A[i*M + j];
}
L[i*M + i] = 1.0;
for (j = i + 1; j < M; ++j) {
L[i*M + j] = 0.0;
}
}
}
if (U) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
U[i*M + j] = 0.0;
}
for (j = i; j < M; ++j) {
U[i*M + j] = A[i*M + j];
}
}
}
}
else if (M > N) {
if (L) {
for (i = 0; i < N; ++i) {
for (j = 0; j < i; ++j) {
L[i*N + j] = A[i*N + j];
}
L[i*N + i] = 1.0;
for (j = i + 1; j < N; ++j) {
L[i*N + j] = 0.0;
}
}
memcpy(L + N*N, A + N*N, sizeof(double)*(M - N)*N);
}
if (U) {
for (i = 0; i < N; ++i) {
for (j = 0; j < i; ++j) {
U[i*N + j] = 0.0;
}
for (j = i; j < N; ++j) {
U[i*N + j] = A[i*N + j];
}
}
}
}
else if (M < N) {
if (L) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
L[i*M + j] = A[i*N + j];
}
L[i*M + i] = 1.0;
for (j = i + 1; j < M; ++j) {
L[i*M + j] = 0.0;
}
}
}
if (U) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
U[i*N + j] = 0.0;
}
for (j = i; j < N; ++j) {
U[i*N + j] = A[i*N + j];
}
}
}
}
for (i = 0; i < M; ++i) {
memcpy(P + i*K, L + ipivt[i] * K, sizeof(double)*K);
}
free(ipivt);
free(L);
}
double* marsaglia_generate(double *values, int N, double average, double deviation)
{
int i;
int M;
double x, y, rsq, f;
M = N + N % 2;
for (i = 0; i < N - 1; i += 2)
{
do {
x = 2.0 * rand() / (double)RAND_MAX - 1.0;
y = 2.0 * rand() / (double)RAND_MAX - 1.0;
rsq = x * x + y * y;
} while (rsq >= 1. || rsq == 0.);
f = sqrt(-2.0 * log(rsq) / rsq);
values[i] = x * f;
values[i + 1] = y * f;
}
if (M != N) {
do {
x = 2.0 * rand() / (double)RAND_MAX - 1.0;
y = 2.0 * rand() / (double)RAND_MAX - 1.0;
rsq = x * x + y * y;
} while (rsq >= 1. || rsq == 0.);
f = sqrt(-2.0 * log(rsq) / rsq);
values[N - 1] = x * f;
}
for (i = 0; i < N; ++i) {
values[i] = (values[i] * deviation + average);
}
return values;
}
void random_matrix(double *A, int M, int N) {
int dim;
dim = M*N;
marsaglia_generate(A, dim, 0.0, 1.0);
}
void linsolve(double *A,int N,double *b,int *ipiv,double *x) {
int i,j,c1,l;
double *y;
double sum;
y = (double*) malloc(sizeof(double) *N);
/*
* Two step Solution L * U * x = b
* Let U*x = y
* Solve L * y = b for y (Forward Substitution
* Solve U * x = b for x (Back Substitution)
*/
for(i = 0; i < N;++i) {
y[i] = 0.;
x[i] = 0.;
if ( A[i*N + i] == 0.) {
printf("The Matrix system does not have a unique solution");
exit(1);
}
//printf("\n B %d",ipiv[i]);
}
// Forward Substitution
y[0] = b[ipiv[0]];
for(i = 1; i < N; ++i) {
sum = 0.;
c1 = i*N;
for(j = 0; j < i; ++j) {
sum += y[j] * A[c1 + j];
}
y[i] = b[ipiv[i]] - sum;
}
// Back Substitution
x[N - 1] = y[N - 1]/A[N * N - 1];
for (i = N - 2; i >= 0; i--) {
sum = 0.;
c1 = i*(N+1);
l=0;
for(j = i+1; j < N;j++) {
l++;
sum += A[c1 + l] * x[j];
}
x[i] = (y[i] - sum) / A[c1];
}
free(y);
}
void minverse(double *A,int N,int *ipiv,double *inv) {
int i,j,stride;
double *col,*x;
col = (double*) malloc(sizeof(double) * N);
x = (double*) malloc(sizeof(double) * N);
for (i = 0; i < N; ++i) {
col[i] = 0.;
x[i] = 0.;
}
for (i = 0; i < N; ++i) {
col[i] = 1.;
linsolve(A,N,col,ipiv,x);
stride = i;
for(j = 0; j < N;++j) {
inv[stride] = x[j];
stride+= N;
}
col[i] = 0.;
}
free(x);
free(col);
}
void eye(double *mat,int N) {
int i,j,t;
for(i = 0;i < N;++i) {
for(j =0; j < N;++j) {
t = i*N;
if (i == j) {
mat[t+j] = 1.;
} else {
mat[t+j] = 0.;
}
}
}
}
void eye_scale(double *mat, int N, double lambda) {
int i, j, t;
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
t = i*N;
if (i == j) {
mat[t + j] = lambda;
}
else {
mat[t + j] = 0.;
}
}
}
}
static double house_1(double*x,int N,double *v) {
double beta,mu,temp;
double *sigma;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
if (N > 1) {
mmult(x+1,x+1,sigma,1,N-1,1);
} else {
sigma[0] = 0.0;
}
v[0] =1.;
for (i = 1; i < N;++i) {
v[i] = x[i];
}
if (sigma[0] == 0. && x[0] >= 0.) {
beta = 0.;
} else if (sigma[0] == 0. && x[0] < 0.) {
beta = -2.;
}else {
mu = sqrt(sigma[0] + x[0] * x[0]);
if (x[0] <= 0.) {
v[0] = x[0] - mu;
} else {
v[0] = - sigma[0] / (x[0] + mu);
}
temp = v[0];
beta = (2.0 * v[0] * v[0]) /(sigma[0] + v[0] * v[0]);
for (i = 0; i < N;++i) {
v[i] /= temp;
}
}
free(sigma);
return beta;
}
double house_2(double*x,int N,double *v) {
double sgn,beta,sc;
double *sigma,*e;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
e = (double*) malloc(sizeof(double) * N);
beta = 2.0;
sgn = 1.0;
mmult(x,x,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
e[0] =1.;
for (i = 1; i < N;++i) {
e[i] = 0.;
}
if (x[0] > 0.) {
sgn = 1.0;
} else if (x[0] < 0.) {
sgn = -1.0;
} else if (x[0] == 0.) {
sgn = 0.;
}
sc = sigma[0] * sgn;
//scale(e,N,1,sc);
e[0] *= sc;
for(i = 0; i < N;++i) {
v[i] = e[i] + x[i];
}
mmult(v,v,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
for(i = 0; i < N;++i) {
v[i] = v[i] / sigma[0];
}
free(sigma);
free(e);
return beta;
}
double house(double*x,int N,double *v) {
double beta;
beta = house_1(x,N,v);
return beta;
}
void housemat(double *v, int N,double beta,double *mat) {
double *temp;
temp = (double*) malloc(sizeof(double) * N * N);
eye(mat,N);
mmult(v,v,temp,N,1,N);
scale(temp,N,N,beta);
msub(mat,temp,mat,N,N);
free(temp);
}
static void tred2(double *a, int N, double *d, double *e) {
// Modified version of Numerical recipes tred2 alogorithm
int l, k, j, i;
double scale, hh, h, g, f;
for (i = N - 1; i > 0; --i) {
l = i - 1;
h = scale = 0.0;
if (l > 0) {
for (k = 0; k <= l; ++k) {
scale +=(double) fabs(a[i*N + k]);
}
if (scale == 0.0) {
e[i] = a[i*N + l];
}
else {
for (k = 0; k <= l; ++k) {
a[i*N + k] /= scale;
h += a[i*N + k] * a[i*N + k];
}
f = a[i*N + l];
g = (double) (f >= 0.0 ? -sqrt(h) : sqrt(h));
e[i] = scale*g;
h -= f*g;
a[i*N + l] = f - g;
f = 0.0;
for (j = 0; j <= l; ++j) {
a[j*N + i] = a[i*N + j] / h;
g = 0.0;
for (k = 0; k <= j; ++k) {
g += a[j*N + k] * a[i*N + k];
}
for (k = j + 1; k <= l; ++k) {
g += a[k*N + j] * a[i*N + k];
}
e[j] = g / h;
f += e[j] * a[i*N + j];
}
hh = f / (h + h);
for (j = 0; j <= l; ++j) {
f = a[i*N + j];
e[j] = g = e[j] - hh*f;
for (k = 0; k <= j; ++k) {
a[j*N + k] -= (f*e[k] + g*a[i*N + k]);
}
}
}
}
else {
e[i] = a[i*N + l];
}
d[i] = h;
}
d[0] = 0.0;
e[0] = 0.0;
for (i = 0; i < N; ++i) {
l = i - 1;
if (d[i]) {
for (j = 0; j <= l; ++j) {
g = 0.0;
for (k = 0; k <= l; ++k) {
g += a[i*N+k] * a[k*N+j];
}
for (k = 0; k <= l; ++k) {
a[k*N + j] -= g*a[k*N+i];
}
}
}
d[i] = a[i*N+i];
a[i*N+i] = 1.0;
for (j = 0; j <= l; ++j) {
a[j*N+i] = a[i*N+j] = 0.0;
}
}
}
static double pythag(double a, double b) {
double absa, absb,val;
absa = (double) fabs(a);
absb = (double) fabs(b);
if (absa > absb) {
val = (double) absa*sqrt(1.0 + (absb / absa)*(absb / absa));
return val;
}
else {
val = (double) (absb == 0.0 ? 0.0 : absb*sqrt(1.0 + (absa / absb)*(absa / absb)));
return val;
}
}
static void tqli(double *d, int N, double *e, double *z) {
int m, l, iter, i, k;
double s, r, p, g, f, dd, c, b;
for (i = 1; i < N; ++i) {
e[i - 1] = e[i];
}
e[N - 1] = 0;
for (l = 0; l < N; ++l) {
iter = 0;
do {
for (m = l; m < N - 1; ++m) {
dd =(double) fabs(d[m]) + fabs(d[m + 1]);
if ((double)(fabs(e[m]) + dd) == dd) {
break;
}
}
if (m != l) {
if (iter++ == 30) {
printf("Too many iterations in tqli");
}
g = (d[l + 1] - d[l]) / (2.0*e[l]);
r = pythag(g, 1.0);
g = d[m] - d[l] + e[l] / (g + (double) SIGN(r, g));
s = c = 1.0;
p = 0.0;
for (i = m - 1; i >= l; --i) {
f = s*e[i];
b = c*e[i];
e[i + 1] = (r = pythag(f, g));
if (r == 0.0) {
d[i + 1] -= p;
e[m] = 0.0;
break;
}
s = f / r;
c = g / r;
g = d[i + 1] - p;
r = (d[i] - g)*s + 2.0*c*b;
d[i + 1] = g + (p = s*r);
g = c*r - b;
for (k = 0; k < N; ++k) {
f = z[k*N + i + 1];
z[k*N + i + 1] = s*z[k*N + i] + c*f;
z[k*N + i] = c*z[k*N + i] - s*f;
}
}
if (r == 0.0 && i >= l) continue;
d[l] -= p;
e[l] = g;
e[m] = 0.0;
}
} while (m != l);
}
}
void eigensystem(double *mat, int N, double *eval, double *evec) {
double *e;
e = (double*)calloc(N, sizeof(double));
memcpy(evec, mat, sizeof(double)*N*N);
tred2(evec, N, eval, e);
tqli(eval, N, e, evec);
free(e);
}
void qrdecomp(double *A, int M, int N,double *bvec) {
int j,i,k,u,t;
double *x,*v,*AT,*w;
double beta;
if (M < N) {
printf("M should be greater than or equal to N");
exit(1);
}
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(j = 0; j < N;++j) {
for(i=j;i < M;++i) {
x[i-j] = A[i*N+j];
}
beta = house(x,M-j,v);
bvec[j] = beta;
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = A[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,beta);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
A[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
if (j < M) {
for(i=j+1;i < M;++i) {
A[i*N+j] = v[i-j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void getQR(double *A,int M,int N,double *bvec,double *Q, double *R) {
int i,j,k,t,u;
double *x,*v,*AT,*w;
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(i = 0; i < N;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i > j) {
R[t+j] = 0.;
} else {
R[t+j] = A[t+j];
}
}
}
for(i = 0; i < M;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i == j) {
Q[t+j] = 1.;
} else {
Q[t+j] = 0.;
}
}
}
for(j = N-1; j >= 0;--j) {
v[0] = 1.;
for(i=j+1;i < M;++i) {
v[i-j] = A[i*N+j];
}
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = Q[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,bvec[j]);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
Q[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void hessenberg(double *A,int N) {
int k,i,j,t,u;
double *x,*v,*AT,*w;
double beta;
x = (double*) malloc(sizeof(double) * N);
v = (double*) malloc(sizeof(double) * N);
AT = (double*) malloc(sizeof(double) * N * N);
w = (double*) malloc(sizeof(double) * N);
for (k = 0; k < N-2;++k) {
for(i=k + 1;i < N;++i) {
x[i-k-1] = A[i*N+k];
//printf("x %lf \n",x[i-k-1]);
}
beta = house(x,N-k-1,v);
for (i=k+1; i < N; i++) {
t = i * N;
u = 0;
for (j=k; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=(N-k-1);
}
}
//mdisplay(AT,N-k,N-k-1);
mmult(AT,v,w,N-k,N-k-1,1);
scale(w,N-k,1,beta);
mmult(v,w,AT,N-k-1,1,N-k);
//mdisplay(AT,N-k-1,N-k);
for (i=k+1; i < N; i++) {
t = i * N;
for (j=k; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-k) + j - k];
}
}
//mdisplay(A,N,N);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
AT[u+j-k-1] = A[t+j];
}
}
//mdisplay(AT,N,N-k-1);
mmult(AT,v,w,N,N-k-1,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,N-k-1);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void francisQR(double *A,int N) {
int m,n,k,q,r,t,u,i,j;
double s,t2,beta;
double *x,*v,*AT,*w;
int NN;
/*
* Reference - Algorithm 7.5.1 Golub,van Loan Matrix Computations 3rd Edition
*/
x = (double*) malloc(sizeof(double) * 3);
v = (double*) malloc(sizeof(double) * 3);
AT = (double*) malloc(sizeof(double) * 3 * N);
w = (double*) malloc(sizeof(double) * N);
n = N-1;
m = n-1;
NN = N*N;
s = A[NN-1] + A[NN-N-2];
t2 = A[NN-1] * A[NN-N-2] - A[NN-2] * A[NN-N-1];
x[0] = A[0]*A[0] + A[1]*A[N] - s*A[0] + t2;
x[1] = A[N]*(A[0] + A[N+1] - s);
x[2] = A[N] * A[N+N+1];
if (N <= 2) {
return;
}
for (k = -1; k < N - 3;++k) {
beta = house(x,3,v);
//mdisplay(x,3,1);
if (k > 0) {
q = k;
} else {
q = 0;
}
//printf("q %d \n",q);
for (i=k+1; i < k+4; i++) {
t = i * N;
u = 0;
for (j=q; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=3;
}
}
mmult(AT,v,w,N-q,3,1);
scale(w,N-q,1,beta);
mmult(v,w,AT,3,1,N-q);
for (i=k+1; i < k+4; i++) {
t = i * N;
for (j=q; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-q) + j - q];
}
}
//mdisplay(A,N,N);
if (k+4 >= n) {
r = N;
} else {
r = k+4+1;
}
//printf("r %d \n",r);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
AT[u+j-k-1] = A[t+j];
}
}
mmult(AT,v,w,r,3,1);
scale(w,r,1,beta);
mmult(w,v,AT,r,1,3);
//mdisplay(AT,N,N-k-1);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
//mdisplay(A,N,N);
x[0] = A[N*(k+2) + k+1];
x[1] = A[N*(k+3) + k+1];
if (k < n-3) {
x[2] = A[N*(k+4) + k+1];
}
//mdisplay(x,3,1);
}
//mdisplay(x,2,1);
beta = house(x,2,v);
for (i=n-1; i < N; i++) {
t = i * N;
u = 0;
for (j=n-2; j < N; j++) {
AT[u+i-n+1] = A[j+t];
u+=2;
}
}
mmult(AT,v,w,3,2,1);
scale(w,3,1,beta);
mmult(v,w,AT,2,1,3);
for (i=n-1; i < N; i++) {
t = i * N;
for (j=n-2; j < N; j++) {
A[t+j] -= AT[(i-n+1)*3 + j - n + 2];
}
}
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
AT[u+j-n+1] = A[t+j];
}
}
mmult(AT,v,w,N,2,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,2);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
A[t+j] -= AT[u+j-n+1];
}
}
free(x);
free(v);
free(AT);
free(w);
}
void eig22(double *A, int stride,double *eigre,double *eigim) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,t2,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
eigre[0] = eigre[1] = at11;
eigim[0] = sqrt(-at12 * at21);
eigim[1] = -sqrt(-at12 * at21);
if ( at12*at21 >= 0) {
if (at12 == 0) {
c = 0;
s = 1;
c2 = 0;
s2 = 1;
cs = 0;
} else {
t = sqrt(at21/at12);
t2 = t * t;
cs = t/(1+t2);
c2 = (1+t2);
s2 = t2 /(1+t2);
}
eigim[0] = eigim[1] = 0.0;
eigre[0] = at11 - cs * (at12 + at21);
eigre[1] = at11 + cs * (at12 + at21);
}
}
int francis_iter(double *A, int N, double *H) {
int success,brkpoint;
int i,j,it,p,q,t,u;
double *temp;
success = 0;
brkpoint = 30 * N;
it = 0;
p = N - 1;
temp = (double*) malloc(sizeof(double) * N * N);
for(i = 0; i < N*N;++i) {
H[i] = A[i];
}
hessenberg(H,N);
while (p > 1 && it < brkpoint) {
while (p > 1 && (H[N*p + p-1] == 0 || H[N*(p-1) + p-2] == 0)) {
if (H[N*p + p-1] == 0) {
p--;
} else if (H[N*(p-1) + p-2] == 0) {
p=p-2;
}
}
if (p > 0) {
q = p-1;
while (q > 0 && fabs(H[N*q + q-1]) != 0) {
q--;
}
//printf("%d %d \n",q,p);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
temp[u+j-q] = H[t+j];
}
}
francisQR(temp,p-q+1);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
H[t+j] = temp[u+j-q];
}
}
//mdisplay(H,N,N);
for(i = q; i <= p-1;++i) {
if ( fabs(H[(i+1)*N+i]) <= TOL * (fabs(H[i*N+i]) + fabs(H[(i+1)*N+i+1]) ) ) {
H[(i+1)*N+i] = 0.;
}
}
it++;
//printf("iter %d \n",it);
}
}
if (it == brkpoint) {
success = 0;
} else {
success = 1;
}
free(temp);
return success;
}
static void eig2t(double *A, int stride) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
A[0] = at11;
A[1] = at12;
A[N] = at21;
A[N+1] = at22;
}
void eig(double *A,int N,double *eigre,double *eigim) {
int i,t,u,n;
double *H;
double t1,t2,cs;
H = (double*) malloc(sizeof(double) * N * N);
n = N - 1;
francis_iter(A,N,H);
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
eig2t(H+u+i,N);
i = i +2;
} else {
i++;
}
}
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
if (H[u+i+1] * H[t+i] < 0.) {
eigre[i] = H[u+i];
eigre[i+1] = H[t+i+1];
eigim[i] = sqrt(-H[u+i+1] * H[t+i]);
eigim[i+1] = -sqrt(-H[u+i+1] * H[t+i]);
} else {
if (H[u+i+1] == 0.) {
cs = 0.;
} else {
t1 = sqrt(H[t+i]/H[u+i+1]);
t2 = t1 * t1;
cs = t1/(1+t2);
}
eigre[i] = H[u+i] - cs * (H[u+i+1] + H[t+i]);
eigre[i+1] = H[u+i] + cs * (H[u+i+1] + H[t+i]);
eigim[i] = 0.;
eigim[i+1] = 0.;
}
i= i + 2;
} else {
eigre[i] = H[u+i];
eigim[i] = 0.;
i++;
}
}
if (i == n) {
eigre[i] = H[N*N - 1];
eigim[i] = 0.;
}
free(H);
}
static int rcholu(double *A,int N, int stride, double *U22) {
int sc;
int j,i,u,w;
double u11;
if (N == 1) {
if (A[0] > 0) {
A[0] = sqrt(A[0]);
return 0;
} else {
return -1;
}
} else {
if (A[0] < 0) {
return -1;
}
u11 = sqrt(A[0]);
A[0] = u11;
for (j = 1; j < N;++j) {
A[j] /= u11;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
sc = rcholu(A+stride+1,N-1,stride,U22);
if (sc == -1) {
return -1;
}
}
return sc;
}
static int rbcholu(double *A,int N, int stride, double *UB, double *UT) {
int bs,bb,i,j,Nb,t,k,u,v,w,sc;
double *b,*x,*U12,*U12T;
double sum;
bs = (int) BLOCKSIZE;
bb = bs*bs;
if (N <= BLOCKSIZE) {
sc = rcholu(A,N,stride,UB);
if (sc == -1) {
return -1;
}
} else {
Nb = N - bs;
x = (double*) malloc(sizeof(double) * bs);
b = (double*) malloc(sizeof(double) * bs);
U12T = (double*) malloc(sizeof(double) * Nb * bs);
U12 = (double*) malloc(sizeof(double) * Nb * bs);
rcholu(A,bs,stride,UB); // U11
for (i =0; i < bs;++i) {
t = i *stride;
u = 0;
for(j = 0; j < N;++j) {
UT[u+i] = A[j+t];
u += bs;
}
}
for(k = 0; k < Nb;++k) {
u = k * bs;
for(i = 0; i < bs;++i) {
b[i] = UT[bb+u+i];
x[i] = 0.;
}
for (i = 0; i < bs;++i) {
t = i*bs;
sum = 0;
for (j = 0; j < i;++j) {
sum += UT[t+j] * x[j];
}
x[i] = (b[i] - sum) / UT[t+i];
}
v = bs + k;
for(i = 0; i < bs;++i) {
A[v] = x[i];
U12T[u+i] = x[i];
v += stride;
}
}
mtranspose(U12T,Nb,bs,U12);
mmult(U12T,U12,UT,Nb,bs,Nb);
free(U12T);
free(U12);
free(b);
free(x);
for (i = 0; i < Nb; ++i) {
u = bs * stride + bs + i * stride;
w = i * Nb;
for(j = i; j < Nb;j++) {
A[j + u] -= UT[j + w];
}
}
sc = rbcholu(A + bs * stride + bs,Nb,stride,UB,UT);
if (sc == -1) {
return -1;
}
}
return sc;
}
int cholu(double *A, int N) {
int stride,i,j,t,sc;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
sc = rcholu(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
return sc;
}
int bcholu(double *A, int N) {
int stride,i,j,t,b,sc;
double *UB,*UT;
b = (int) BLOCKSIZE;
UT = (double*) malloc(sizeof(double) * N * N);
UB = (double*) malloc(sizeof(double) * b * b);
stride = N;
sc = rbcholu(A,N,stride,UB,UT);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(UB);
free(UT);
return sc;
}
int chol(double *A, int N) {
int sc;
if ( N <= (int) BLOCKSIZE) {
sc = cholu(A,N);
} else {
sc = bcholu(A,N);
}
return sc;
}
static void rchold(double *A,int N, int stride, double *U22) {
int j,i,u,w;
double d1;
if (N == 1) {
return;
} else {
d1 = A[0];
for (j = 1; j < N;++j) {
A[j] /= d1;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
scale(U22,N-1,N-1,d1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
rchold(A+stride+1,N-1,stride,U22);
}
}
void chold(double *A, int N) {
int stride,i,j,t;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
rchold(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
}
void svd_sort(double *U,int M,int N,double *V,double *q) {
/*
* Pavel Sakov's CSA SVD sort routine is used with some minor
* modifications. See The License below
*/
/*
* Copyright (C) 2000-2008 Pavel Sakov and CSIRO
Redistribution and use of material from the package `csa', with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of material must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. The names of the authors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*/
int i,j;
double *UT,*VT,*qq;
int *pos;
UT = (double*) malloc(sizeof(double) * N * M);
VT = (double*) malloc(sizeof(double) * N * N);
qq = (double*) malloc(sizeof(double) * N);
pos = (int*) malloc(sizeof(int) * N);
for(i = 0;i < N;++i) {
qq[i] = q[i];
}
for(i = 0;i < M*N;++i) {
UT[i] = U[i];
}
for(i = 0;i < N*N;++i) {
VT[i] = V[i];
}
//mtranspose(U,M,N,UT);
//mtranspose(V,N,N,VT);
sort1d(q,N,pos);
for(i = 0; i < N;++i) {
q[i] = qq[pos[i]];
for (j = 0; j < M;++j) {
U[j*N+i] = UT[j*N+pos[i]];
}
for (j = 0; j < N;++j) {
V[j*N+i] = VT[j*N+pos[i]];
}
}
free(UT);
free(VT);
free(qq);
free(pos);
}
int svd(double *A,int M,int N,double *U,double *V,double *q) {
int i,j,k,l,t,t2,ierr,cancel,iter,l1;
double eps,g,x,s,temp,f,h,c,y,z,scale;
double *e;
/*
THIS SUBROUTINE IS THE MODIFIED C TRANSLATION OF THE
EISPACK FORTRAN TRANSLATION OF THE ALGOL PROCEDURE SVD,
NUM. MATH. 14, 403-420(1970) BY GOLUB AND REINSCH.
HANDBOOK FOR AUTO. COMP., VOL II-LINEAR ALGEBRA, 134-151(1971).
*/
/*
* U = MXN
* V - NXN
* Q - NX1
*/
/*
* The program return error codes
*
* Code 0 if the computation is successful
* Code -1 If M < N . Transpose the matrix such that rows > columns and try again
* Code 15 if maximum iterations are reached without achieving convergence. Increase SVDMAXITER value
* in matrix.h header file. Default Value is 50
*
*/
if (M < N) {
printf("Rows (M) should be greater than Columns (B) \n");
printf("Retry By Transposing the Input Matrix");
return -1;
}
e = (double*) malloc(sizeof(double) * N);
ierr = 0;
eps = macheps();
g = scale = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M) {
for(k = i; k < M;++k) {
scale += fabs(U[k*N+i]);
}
if (scale != 0.0) {
for(k = i; k < M;++k) {
t = k * N;
U[t+i] /= scale;
temp = U[t+i];
s += temp*temp;
}
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
if (i < N - 1) {
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += U[t+i]*U[t+j];
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += f * U[t+i];
}
}
}
for(k = i; k < M;++k) {
t = k * N;
U[t+i] *= scale;
}
}
}
q[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M && i != N - 1) {
t = i *N;
for(k = l; k < M;++k) {
scale += fabs(U[t+k]);
}
if (scale != 0.0) {
for(k = l; k < N;++k) {
U[t+k] /= scale;
temp = U[t+k];
s = s + temp*temp;
}
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
for (k = l; k < N; k++)
U[t+k] *= scale;
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
/*
ierr = 0;
eps = macheps();
tol = eps;
g = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = g;
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
temp = U[t+i];
s += temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += (U[t+i]*U[t+j]);
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += (f * U[t+i]);
}
}
}
q[i] = g;
s = 0.0;
t = i * N;
for(k = l; k < N;++k) {
temp = U[t+k];
s = s + temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
*/
//Accumulating Right Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
if (i < N - 1) {
if (g != 0.0) {
h = U[t+i+1] * g;
for(j = l;j < N;++j) {
V[j*N+i] = U[t+j] / h;
}
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < N;++k) {
s += U[t+k] * V[k*N+j];
}
for(k = l; k < N;++k) {
V[k*N+j] += (s * V[k*N+i]);
}
}
}
for(j = l; j < N;++j) {
V[t+j] = V[j*N+i] = 0.0;
}
}
V[t+i] = 1.0;
g = e[i];
l = i;
}
//Accumulating Left Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
l = i+1;
g = q[i];
if (i < N - 1) {
for(j = l;j < N;++j) {
U[t+j] = 0.0;
}
}
if (g != 0.0) {
if (i != N - 1) {
//h = U[t+i] * g;
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < M;++k) {
s += (U[k*N+i] * U[k*N+j]);
}
f = (s / U[t+i]) / g;
for(k = i; k < M;++k) {
U[k*N+j] += (f * U[k*N+i]);
}
}
}
for(j = i; j < M;++j) {
U[j*N+i] = U[j*N+i] / g;
}
} else {
for(j = i; j < M;++j) {
U[j*N+i] = 0.0;
}
}
U[t+i] += 1.0;
}
// mdisplay(U,M,N);
eps = eps * x;
for(k = N - 1; k >= 0; --k) {
iter = 0;
while(1) {
iter++;
if (iter > SVDMAXITER) {
printf("Convergence Not Achieved \n");
return 15;
}
cancel = 1;
for(l = k; l >= 0; --l) {
if (fabs(e[l]) <= eps) {
cancel = 0; //test f convergence
break;
}
if (fabs(q[l-1]) <= eps) {
//Cancel
break;
}
}
if (cancel) {
c = 0.0;
s = 1.0;
l1 = l - 1;
for(i = l; i <= k;++i) {
f = s*e[i];
e[i] *= c;
if (fabs(f) <= eps) {
break;
}
g = q[i];
h = q[i] = hypot(f,g);
c = g/h;
s = -f/h;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+l1];
z = U[t+i];
U[t+l1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
}
z = q[k];
if (l != k) {
x = q[l];
y = q[k-1];
g = e[k-1];
h = e[k];
f = 0.5 * (((g + z) / h) * ((g - z) / y) + y / h - h / y);
g = hypot(f,1.0);
if (f < 0.0) {
temp = f - g;
} else {
temp = f+g;
}
f = x - (z / x) * z + (h / x) * (y / temp - h);
//Next QR Transformation
c = s = 1.0;
for(i = l+1; i <= k;++i) {
g = e[i];
y = q[i];
h = s * g;
g = c * g;
e[i-1] = z = hypot(f,h);
c = f / z;
s = h / z;
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y *= c;
for(j = 0; j < N;++j) {
t = j * N;
x = V[t+i-1];
z = V[t+i];
V[t+i-1] = x * c + z * s;
V[t+i] = z * c - x * s;
}
q[i-1] = z = hypot(f,h);
if (z != 0.0) {
c = f / z;
s = h / z;
}
f = c * g + s * y;
x = c * y - s * g;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+i-1];
z = U[t+i];
U[t+i-1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
e[l] = 0.0;
e[k] = f;
q[k] = x;
} else {
//convergence
if (z < 0.0) {
q[k] = -z;
for (j = 0; j < N; j++) {
t = j *N;
V[t+k] = -V[t+k];
}
}
break;
}
}
}
svd_sort(U,M,N,V,q);
free(e);
return ierr;
}
int svd_transpose(double *A, int M, int N, double *U, double *V, double *q) {
int ret;
/* Call this routine if M < N
* U = MXM
* V - NXM
* Q - MX1
* A = (V * diag(Q) * U' )'
*/
if (M >= N) {
printf("M>=N. Use svd routine.\n");
exit(-1);
}
mtranspose(A, M, N, V);
ret = svd(V, N, M, V, U, q);
return ret;
}
static int rank_c(double *A, int M,int N) {
int i,rnk,ret;
double eps,tol,szmax,qmax;
double *U,*V,*q;
U = (double*) malloc(sizeof(double) * M*N);
V = (double*) malloc(sizeof(double) * N*N);
q = (double*) malloc(sizeof(double) * N);
eps = macheps();
rnk = 0;
if (M < N) {
//mtranspose(A,M,N,U);
szmax = (double) N;
} else {
szmax = (double) M;
}
ret = svd(A,M,N,U,V,q);
qmax = q[0];
if ( ret != 0) {
printf("Failed to Compute SVD");
return -1;
}
tol = qmax*szmax *eps;
for(i = 0; i < N;++i) {
if (q[i] > tol) {
rnk++;
}
}
free(U);
free(V);
free(q);
return rnk;
}
int rank(double *A, int M,int N) {
int rnk;
double *AT;
AT = (double*) malloc(sizeof(double) * M*N);
if (M < N) {
mtranspose(A,M,N,AT);
rnk = rank_c(AT,N,M);
} else {
rnk = rank_c(A,M,N);
}
free(AT);
return rnk;
}
void rsvd(double *A, int M, int N,int K, int oversample, int n_iter,double *U, double *V, double *S) {
/*
A - MXN matrix
K - Rank of approximation being constructed. K <= min(m,n). default value 6
L - block size of the normalized power iterations. Default K+2
n_iter - number of normalized power iterations to conduct. Default 2
U - MXK
V - NXK
S - Diagonal Matrix KXK
*/
if (n_iter == 0) {
printf("Number of power iterations must be >= 1 or set it to < 0 if you want to use default value #RSVD_POWER_ITERATIONS \n");
exit(-1);
}
else if (n_iter < 0) {
n_iter = (int)RSVD_POWER_ITERATIONS;
}
int i, j,maxdim,L;
double *Q1,*Q2,*AT,*R,*bvec,*uq,*vq,*sq;
int *ipiv;
L = K + oversample;
maxdim = M > N ? M : N;
srand(time(NULL));
if (M >= N) {
Q1 = (double*)malloc(sizeof(double)*maxdim*L);
Q2 = (double*)malloc(sizeof(double)*maxdim*L);
AT = (double*)malloc(sizeof(double)*M*N);
ipiv = (int*)malloc(sizeof(int)*maxdim);
R = (double*)malloc(sizeof(double)*L*L);
bvec = (double*)malloc(sizeof(double)*L*L);
uq = (double*)malloc(sizeof(double)*L*L);
vq = (double*)malloc(sizeof(double)*L*N);
sq = (double*)malloc(sizeof(double)*L);
random_matrix(Q1, N, L);// N*L
mtranspose(A, M, N, AT);
for (i = 0; i < n_iter; ++i) {
mmult(A, Q1, Q2, M, N, L);// M*L
rludecomp(Q2, M, L, ipiv);
getPU(Q2, M, L, ipiv, Q1, NULL);// M*L
mmult(AT, Q1, Q2, N, M, L);//N*L
rludecomp(Q2, N, L, ipiv);
getPU(Q2, N, L, ipiv, Q1, NULL);// N*L
}
mmult(A, Q1, Q2, M, N, L);
qrdecomp(Q2, M, L, bvec);
getQR(Q2, M, L, bvec, Q1, R);// M*L
mtranspose(Q1, M, L, Q2);// L*M
mmult(Q2, A, Q1, L, M, N);// L* N
svd_transpose(Q1, L, N, uq, vq, sq);
// uq - LXL
// vq - NXL
//itranspose(vq, N, L);
for (i = 0; i < N; ++i) {
for (j = 0; j < K; ++j) {
V[i*K + j] = vq[i*L + j];
}
}
memcpy(S, sq, sizeof(double)*K);
itranspose(Q2, L, M);
mmult(Q2, uq, Q1, M, L, L);
for (i = 0; i < M; ++i) {
for (j = 0; j < K; ++j) {
U[i*K + j] = Q1[i*L + j];
}
}
free(Q1);
free(Q2);
free(ipiv);
free(AT);
free(R);
free(bvec);
free(uq);
free(vq);
free(sq);
}
else {
printf("Randomized SVD is only implemented for tall matrices (rows > columns)");
exit(-1);
}
}
|
GB_unop__one_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__one_int16_int16
// op(A') function: GB_unop_tran__one_int16_int16
// C type: int16_t
// A type: int16_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = 1 ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__one_int16_int16
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__one_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bd_cilk.c | #include <stdlib.h>
#include <stdio.h>
#include <unistd.h> // access
#include <math.h>
#include <assert.h>
#include "timer.h"
#include "bd.h"
#include <omp.h>
#include <mkl.h>
#include <cilk/cilk.h>
#include <cilk/cilk_api.h>
#define NTHREADS 24
#define M_PI 3.14159265358979323846
#define my_EPS 0.000000001
void print_matrix(double *a, int n){
for(int i=0;i<5;i++){
for(int j=0;j<n;j++){
printf("%lf ", a[i*n+j]);
}
printf("\n\n");
}
return;
}
void print_array(double *a, int n){
for(int i=0;i<n;i++){
printf("%lf ", a[i]);
}
printf("\n");
return;
}
//****************************** RPY_EWALD part *****************************************************
inline void scalar_rpy_ewald_real(double r, double xi, double a3, double *m11, double *m12)
{
double a = 1.;
double xi2 = xi*xi;
double xi3 = xi2*xi;
double xi5 = xi3*xi2;
double xi7 = xi5*xi2;
double r2 = r*r;
double r4 = r2*r2;
double ri = 1./r;
double ri2 = ri*ri;
double ri3 = ri*ri2;
double erfc_xi_r = erfc(xi*r);
double pi_exp = 1./sqrt(M_PI) * exp(-xi2*r2);
*m11 = (0.75*a*ri + 0.5*a3*ri3)*erfc_xi_r + ( 4*xi7*a3*r4 + 3*xi3*a*r2 - 20*xi5*a3*r2 - 4.5*xi*a + 14*xi3*a3 + xi*a3*ri2)*pi_exp;
*m12 = (0.75*a*ri - 1.5*a3*ri3)*erfc_xi_r + (-4*xi7*a3*r4 - 3*xi3*a*r2 + 16*xi5*a3*r2 + 1.5*xi*a - 2*xi3*a3 - 3*xi*a3*ri2)*pi_exp;
}
inline void scalar_rpy_ewald_recip(double k, double xi, double *m2)
{
double a = 1.;
double a3 = 1.;
double k2 = k*k;
double xii2k2 = k2/(xi*xi);
*m2 = (1. + 0.25*xii2k2 + 0.125*xii2k2*xii2k2) * 6.*M_PI/k2 * exp(-0.25*xii2k2);
}
// note: positions must be wrapped inside the box [0,L]
int rpy_ewald(int np, double * restrict a, const double * restrict pos, double L, const double * restrict rad, double xi, int nr, int nk)
{
// __cilkrts_set_param("nworkers", "24");
// printf("Inside function rpy_ewald\n");
__declspec(align(64)) double rvec[8];
__declspec(align(64)) double rvec0[8];
__declspec(align(64)) double temp[8];
double a3;
double m11, m12, m2;
double eye3_coef;
double r2, r;
int x, y, z;
int i, j;
double *ap0, *ap;
int vsize = ((2*nk+1)*(2*nk+1)*(2*nk+1) - 1) / 2;
#define VSIZE ((2*6+1)*(2*6+1)*(2*6+1) - 1) / 2
// int A_VSIZE = ceil(VSIZE/8.0)*8;
// int K_VSIZE = ceil(3*VSIZE/8.0)*8;
// printf("check vsize=%d\n", A_VSIZE);
__declspec(align(64)) double k_array[VSIZE];//1104
__declspec(align(64)) double m2_array[VSIZE];//1104
__declspec(align(64)) double kvec_array[3*VSIZE];//3296
int ind;
__declspec(align(64)) double kvec[8];
double k;
double t;
double vinv = 1./(L*L*L);
double time0, time1;
double time0_real, time1_real;
double time0_recip, time1_recip;
// INDICES for converting for loops
int _b, _index, ib, ib2;
// *************************************************************************
// // compute and save coefficients for reciprocal-space sum
// // Due to symmetry, only need half of the grid points
ind = 0;
_b = (2*nk+1);
for (int _index =0 ;_index < (_b*_b*_b -1)/2; _index++){// Using indices x,y,z are recalculated
z = _index%(_b)-nk;// adjusting the indices
x = (_index-_index%(_b*_b))/(_b*_b)-nk;
y = (_index%(_b*_b)-_index%(_b))/_b-nk;
k_array[ind] = 2.*M_PI/L*sqrt((double)(x*x + y*y + z*z));
scalar_rpy_ewald_recip(k_array[ind], xi, &m2_array[ind]);
kvec_array[3*ind ] = 2.*M_PI/L*x;
kvec_array[3*ind+1] = 2.*M_PI/L*y;
kvec_array[3*ind+2] = 2.*M_PI/L*z;
ind++;
}
// #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, ap, ap0, _b, temp, eye3_coef, _index, rvec0, rvec, x, y, z, r, r2, m11, m12, a3 )
cilk_for (int _index1 = np*(np-1)/2-1; _index1>=0; _index1--){
int i, j, _b, _index, x, y, z;
double *ap, *ap0, eye3_coef, r, r2, m11, m12, a3;
__declspec(align(64)) double rvec[8];
__declspec(align(64)) double rvec0[8];
__declspec(align(64)) double temp[8];
i = np-1-(int)((1+sqrt(8*_index1+1))/2);
j = np-1-_index1 + (int)((1+sqrt(8*_index1+1))/2)*((int)((1+sqrt(8*_index1+1))/2)-1)/2;
temp[0] = 0.;
temp[1] = 0.; temp[3] = 0.;
temp[2] = 0.; temp[4] = 0.; temp[5] = 0.;
eye3_coef = 0.;
rvec0[0] = pos[3*i] - pos[3*j];
rvec0[1] = pos[3*i+1] - pos[3*j+1];
rvec0[2] = pos[3*i+2] - pos[3*j+2];
a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]);
_b = (2*nr+1);
//shared(eye3_coef, temp, rvec0, L, xi, a3, m11, m12, _b, xi3, xi5, xi7, xi)
////// #pragma omp parallel for schedule(static) private(rvec, x, y, z, r, r2, m11, m12) shared(eye3_coef, temp, rvec0, a3)
for (int _index =0 ;_index < _b*_b*_b; _index++){
z =_index%(_b)-nr;// adjusting the indices
x = (_index-_index%(_b*_b))/(_b*_b)-nr;
y = (_index%(_b*_b)-_index%(_b))/_b-nr;
rvec[0] = rvec0[0] + x*L;
rvec[1] = rvec0[1] + y*L;
rvec[2] = rvec0[2] + z*L;
// compute norm
r2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
r = sqrt(r2);
rvec[0] /= r;
rvec[1] /= r;
rvec[2] /= r;
scalar_rpy_ewald_real(r, xi, a3, &m11, &m12);
eye3_coef += m11;
temp[0] += m12 * rvec[0] * rvec[0];
temp[1] += m12 * rvec[0] * rvec[1];
temp[2] += m12 * rvec[0] * rvec[2];
temp[3] += m12 * rvec[1] * rvec[1];
temp[4] += m12 * rvec[1] * rvec[2];
temp[5] += m12 * rvec[2] * rvec[2];
}
// add contribution to eye3 term
temp[0] += eye3_coef;
temp[3] += eye3_coef;
temp[5] += eye3_coef;
// sum into global matrix (only lower-triangular part)
// // Use matlab to add transpose
ap0 = &a[np*3*3*i + 3*j];
ap = ap0;
*ap++ = temp[0];
*ap++ = temp[1];
*ap = temp[2];
ap = ap0+np*3;
*ap++ = temp[1];
*ap++ = temp[3];
*ap = temp[4];
ap = ap0+np*3+np*3;
*ap++ = temp[2];
*ap++ = temp[4];
*ap = temp[5];
}
// reciprocal-space sum
// #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, temp, ap, ap0, ind, rvec, kvec, k, m2, t, a3)
cilk_for (int _index = np*(np+1)/2-1; _index>=0; _index--){
int i, j, ind;
double *ap, *ap0, k, m2, t, a3;
__declspec(align(64)) double temp[8];
__declspec(align(64)) double rvec[8];
__declspec(align(64)) double kvec[8];
i = np-1-(int)((-1+sqrt(8*_index+1))/2);
j = np-1-_index + (int)((-1+sqrt(8*_index+1))/2)*((int)((-1+sqrt(8*_index+1))/2)+1)/2;
rvec[0] = pos[3*i+0] - pos[3*j];
rvec[1] = pos[3*i+1] - pos[3*j+1];
rvec[2] = pos[3*i+2] - pos[3*j+2];
temp[0] = 0.;
temp[1] = 0.; temp[3] = 0.;
temp[2] = 0.; temp[4] = 0.; temp[5] = 0.;
a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]);
for (ind=0; ind<vsize; ind++)
{
k = k_array[ind];
m2 = m2_array[ind];
kvec[0] = kvec_array[3*ind ];
kvec[1] = kvec_array[3*ind+1];
kvec[2] = kvec_array[3*ind+2];
t = 2.*vinv*m2*cos(kvec[0]*rvec[0] + kvec[1]*rvec[1] + kvec[2]*rvec[2])*(1.-a3*k*k/3.);
kvec[0] /= k;
kvec[1] /= k;
kvec[2] /= k;
temp[0] += t * (1. - kvec[0]*kvec[0]);
temp[1] += t * - kvec[0]*kvec[1];
temp[2] += t * - kvec[0]*kvec[2];
temp[3] += t * (1. - kvec[1]*kvec[1]);
temp[4] += t * - kvec[1]*kvec[2];
temp[5] += t * (1. - kvec[2]*kvec[2]);
}
// sum into matrix
// // sum with existing values
ap0 = &a[np*3*3*i + 3*j];
ap = ap0;
*ap++ += temp[0];
*ap++ += temp[1];
*ap += temp[2];
ap = ap0+np*3;
*ap++ += temp[1];
*ap++ += temp[3];// diagonal element
*ap += temp[4];
ap = ap0+np*3+np*3;
*ap++ += temp[2];
*ap++ += temp[4];
*ap += temp[5];// diagonal element
}
// self-part
for (i=0; i<np; i++)// adding some term to diagonal
{
t = 1./rad[i] - (6. - 40./3.*xi*xi*rad[i]*rad[i])*xi/sqrt(M_PI);
t *= 0.5;
for (j=0; j<3; j++)
{
ind = 3*i+j;
a[ind*np*3+ind] = a[ind*np*3+ind]*0.5+t;// taking care of (i==j) condition
}
}
return 0;
}
//**************************************************************************************************
void get_indices(int index, int *i, int *j, int *k, int b){
int ib, ib2;
ib = index%(b); ib2 = index%(b*b);
*k = ib;
*i = (index-ib2)/(b*b);
*j = (ib2-*k)/b;
return;
}
struct box
{
int head;
};
// it is possible to use smaller boxes and more complex neighbor patterns
#define NUM_BOX_NEIGHBORS 14
int box_neighbors[NUM_BOX_NEIGHBORS][3] =
{
{-1,-1,-1},
{-1,-1, 0},
{-1,-1,+1},
{-1, 0,-1},
{-1, 0, 0},
{-1, 0,+1},
{-1,+1,-1},
{-1,+1, 0},
{-1,+1,+1},
{ 0,-1,-1},
{ 0,-1, 0},
{ 0,-1,+1},
{ 0, 0,-1},
{ 0, 0, 0} // will calculate within the box interactions
};
/*
// CHECK RPY*************
int gold_read(const char *filename, int npos, double *gold)
{
int npos_read;
FILE *fp = fopen(filename, "r");
assert(fp);
fscanf(fp, "%d\n", &npos_read);
char label[100];
fgets(label, 100, fp);
assert(npos == npos_read);
for (int i=0; i<3*npos; i++) {
for (int j=0; j<3*npos; j++) {
fscanf(fp, "%lf\n", &gold[i*(3*npos) + j]);
}
}
fclose(fp);
return 0;
}
double compare_gold(int npos, double *a,double *gold) {
double err = 0.0;
printf("a = %lf\n", a[3]);
printf("gold = %lf\n", gold[3]);
for (int i=0; i<npos; i++) {
for (int j=0; j<npos; j++) {
double diff = a[i*(npos*3) + j] - gold[i*(npos*3) +j];
err += diff*diff;
// if(err>0){printf("error at position: i=%d j=%d and err = %lf\n", i, j, err);}
// printf("error at position: i=%d j=%d and err = %lf\n", i, j, err);
}
}
return err;
}
// **********************
*/
int bd(int npos, double * restrict pos_orig, double * restrict buf, const int *types, double L, double * restrict pos, int* restrict next, double* restrict forces, double f_const, double * restrict au, double * restrict rad, double xi, int nr, int nk, double * restrict hd_vec)
{
// __cilkrts_set_param("nworkers", NTHREADS);
// __cilkrts_set_param("nworkers", "24");
/*
//************************** CHECK RPY part ***************************************************
printf("npos = %d, L= %lf\n", npos, L);
char *gold_filename = "gold.dat";
double *gold = (double *) _mm_malloc((3*npos) * (3*npos) * sizeof(double), 64);
if (access(gold_filename, F_OK) == -1) {
printf("[WARNING] Unable to access gold file \"%s\"; comparison will not proceed.\n", gold_filename);
} else {
gold_read(gold_filename, npos, gold);
}
rpy_ewald(npos, au, pos_orig, L, rad, xi, nr, nk);// DELETE after testing
double error = compare_gold(npos, au, gold);
printf("Squared Error: %f\n", error);
return 500;
//*********************************************************************************************
*/
/*
// generate random values from standard normal distribution
// note: this MKL function is sequential but vectorized
vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, stream, 3*npos, buf, 0., 1.);
// printf("Calculating the Hydrodynamic Interations for the given particle positions\n");
// au = upper triangular matrix with hydrodynamic interaction values
// pos = wrapped up position inside the box_width = L;
// rad = radius of particles; xi, nr, nk are constants.
rpy_ewald(npos, au, pos, L, rad, xi, nr, nk);
print_matrix(au, 3*npos);
printf("Getting the cholesky decomposition\n");
LAPACKE_dpotrf(LAPACK_ROW_MAJOR, 'U', 3*npos, au, 3*npos);
// Get interations vector by multiplying l_cols by buf)
print_matrix(au, 3*npos);
// print_matrix(au, 3*npos);
printf("Multiplying by random gaussian vector \n");
cblas_dgemm(CblasRowMajor, CblasTrans, CblasTrans, 3*npos, 1, 3*npos, 1, au, 3*npos, buf, 3*npos, 0, hd_vec, 1);
print_array(buf, 3*npos);
printf("printing the correlation vector\n");
print_array(hd_vec, 3*npos);
*/
// Initialisations required for INTERACTION FUNCTION******** NOTE: Can take input to bd itself!!!
double krepul = 100, a=1, a_sq, phi=0.2, f;
a_sq = a*a;
int boxdim;// boxdim is number of cells in L
double cutoff2; int numpairs_p;
cutoff2 = 4;// cutoff < L/boxdim
boxdim =(int)(L/cutoff2)*a;//(int)(L/cutoff2*0.8);
printf("L = %lf cutoff2 = %lf boxdim = %d\n", L, cutoff2, boxdim);
struct box b[boxdim][boxdim][boxdim];
struct box *bp;
struct box *neigh_bp;
// box indices
int idx, idy, idz, index, box2, ib2;
int neigh_idx, neigh_idy, neigh_idz;
// allocate implied linked list
int p1, p2, j, i;
double d2, dx, dy, dz, s;
box2 = boxdim*boxdim;
//*****************************************END initialisations***********************************
if (boxdim < 4 || cutoff2 > (L/boxdim)*(L/boxdim))
{
printf("interactions: bad input parameters\n");
// return 1;
}
double t0, t_init_cells = 0, t_assign_to_cells=0, t_update_pos=0, t_force=0, t_hd = 0, t_cho = 0;
for (int step=0; step<INTERVAL_LEN; step++)
{
// printf("step = %d\n", step);
// Calculation of interaction per time step
t0 = time_in_seconds();
// allocate memory for particles in each box
// #pragma omp parallel for schedule(static) private(idx, idy, idz, ib2) shared(b, boxdim, box2)
// for (index=0; index<boxdim*box2; index++){
// idz = index%(boxdim);
// ib2 = index%(box2);
// idx = (index-ib2)/(box2);
// idy = (ib2-idz)/boxdim;
// b[idx][idy][idz].head=-1;
// }
for (idx=0; idx<boxdim; idx++){
for (idy=0; idy<boxdim; idy++){
for (idz=0; idz<boxdim; idz++){
b[idx][idy][idz].head=-1;
}
}
}
t_init_cells += time_in_seconds()-t0;
t0 = time_in_seconds();
// traverse all particles and assign to boxes
// #pragma omp parallel for schedule(static) private(i, idx, idy, idz, bp) shared(b, next) num_threads(NTHREADS)
for (i=0; i<npos; i++)
{
if (pos_orig[3*i] >= 0){pos[3*i]= fmod(pos_orig[3*i], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i] = L-fmod(-1*pos_orig[3*i], L);
}
if (pos_orig[3*i+1] >= 0){pos[3*i+1]= fmod(pos_orig[3*i+1], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i+1] = L-fmod(-1*pos_orig[3*i+1], L);
}
if (pos_orig[3*i+2] >= 0){pos[3*i+2]= fmod(pos_orig[3*i+2], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i+2] = L-fmod(-1*pos_orig[3*i+2], L);
}
if (pos[3*i]<0){printf("pos_orig = %lf pos defect = %lf and i = %d and L =%lf\n", pos_orig[3*i], pos[3*i], i, L);}
// initialize entry of implied linked list
next[i] = -1;
forces[3*i+0] = 0; forces[3*i+1] = 0; forces[3*i+2] = 0; // re-initialising interaction forces at each time step
// which box does the particle belong to?
// assumes particles have positions within [0,L]^3
idx = (int)(pos[3*i ]/L*boxdim);
idy = (int)(pos[3*i+1]/L*boxdim);
idz = (int)(pos[3*i+2]/L*boxdim);
// add to beginning of implied linked list
bp = &b[idx][idy][idz];
// next[i] = bp->head; // next = previous (my notation)
// #pragma omp critical
// {
next[i] = bp->head; // next = previous (my notation)
bp->head = i; // head = latest (my notation)
// }
}
t_assign_to_cells += time_in_seconds()-t0;
t0 = time_in_seconds();
// #pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f, idx, idy, idz, ib2, bp) shared(b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, box2) num_threads(NTHREADS)
cilk_for (int index=0; index<boxdim*box2; index++){
int j, neigh_idx, neigh_idy, neigh_idz, p1, p2, f, idx, idy, idz, ib2;
double dx, dy, dz, s, d2;
struct box *neigh_bp;
struct box *bp;
idz = index%(boxdim);
ib2 = index%(box2);
idx = (index-ib2)/(box2);
idy = (ib2-idz)/boxdim;
bp = &b[idx][idy][idz];
// interactions within and other boxes
// #pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f) shared(bp, b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, idx, idy, idz)// num_threads(NTHREADS)
for (j=0; j<NUM_BOX_NEIGHBORS; j++)
{
neigh_idx = (idx + box_neighbors[j][0] + boxdim) % boxdim;
neigh_idy = (idy + box_neighbors[j][1] + boxdim) % boxdim;
neigh_idz = (idz + box_neighbors[j][2] + boxdim) % boxdim;
neigh_bp = &b[neigh_idx][neigh_idy][neigh_idz];
// when using boxes, the minimum image computation is
// known beforehand, thus we can compute position offsets
// to compensate for wraparound when computing distances
double xoffset = 0.;
double yoffset = 0.;
double zoffset = 0.;
if (idx + box_neighbors[j][0] == -1) xoffset = -L;
if (idy + box_neighbors[j][1] == -1) yoffset = -L;
if (idz + box_neighbors[j][2] == -1) zoffset = -L;
if (idx + box_neighbors[j][0] == boxdim) xoffset = L;
if (idy + box_neighbors[j][1] == boxdim) yoffset = L;
if (idz + box_neighbors[j][2] == boxdim) zoffset = L;
// NOTE: modifying the function to update the forces
p1 = neigh_bp->head;
while (p1 != -1)
{
p2 = bp->head;
while (p2 != -1)
{
// compute distance vector
dx = pos[3*p1+0] - pos[3*p2+0] + xoffset;
dy = pos[3*p1+1] - pos[3*p2+1] + yoffset;
dz = pos[3*p1+2] - pos[3*p2+2] + zoffset;
d2 = dx*dx+dy*dy+dz*dz+my_EPS;
if ( d2<4.0*a_sq)
{
s = sqrt(d2);
f = krepul*(2*a-s);
// #pragma omp atomic
forces[3*p1+0] += f*dx/s;
// #pragma omp atomic
forces[3*p1+1] += f*dy/s;
// #pragma omp atomic
forces[3*p1+2] += f*dz/s;
// #pragma omp atomic
forces[3*p2+0] -= f*dx/s;
// #pragma omp atomic
forces[3*p2+1] -= f*dy/s;
// #pragma omp atomic
forces[3*p2+2] -= f*dz/s;
}
p2 = next[p2];
}
p1 = next[p1];
}
}
}
t_force += time_in_seconds() - t0;
t0 = time_in_seconds();
// printf("Calculating the Hydrodynamic Interations for the given particle positions\n");
// au = upper triangular matrix with hydrodynamic interaction values
// pos = wrapped up position inside the box_width = L;
// rad = radius of particles; xi, nr, nk are constants.
for (int p1=0; p1<3*npos*3*npos; p1++){
au[p1] = 0;
}
// rpy_ewald(npos, au, pos, L, rad, xi, nr, nk);
/***************************/
//int rpy_ewald(int np, double * restrict a, const double * restrict pos, double L, const double * restrict rad, double xi, int nr, int nk)
int np = npos;
// __cilkrts_set_param("nworkers", "24");
// printf("Inside function rpy_ewald\n");
__declspec(align(64)) double rvec[8];
__declspec(align(64)) double rvec0[8];
__declspec(align(64)) double temp[8];
double a3;
double m11, m12, m2;
double eye3_coef;
double r2, r;
int x, y, z;
int i, j;
double *ap0, *ap;
int vsize = ((2*nk+1)*(2*nk+1)*(2*nk+1) - 1) / 2;
#define VSIZE ((2*6+1)*(2*6+1)*(2*6+1) - 1) / 2
// int A_VSIZE = ceil(VSIZE/8.0)*8;
// int K_VSIZE = ceil(3*VSIZE/8.0)*8;
// printf("check vsize=%d\n", A_VSIZE);
__declspec(align(64)) double k_array[VSIZE];//1104
__declspec(align(64)) double m2_array[VSIZE];//1104
__declspec(align(64)) double kvec_array[3*VSIZE];//3296
int ind;
__declspec(align(64)) double kvec[8];
double k;
double t;
double vinv = 1./(L*L*L);
double time0, time1;
double time0_real, time1_real;
double time0_recip, time1_recip;
// INDICES for converting for loops
int _b, _index;
// *************************************************************************
// // compute and save coefficients for reciprocal-space sum
// // Due to symmetry, only need half of the grid points
ind = 0;
_b = (2*nk+1);
for (int _index =0 ;_index < (_b*_b*_b -1)/2; _index++){// Using indices x,y,z are recalculated
z = _index%(_b)-nk;// adjusting the indices
x = (_index-_index%(_b*_b))/(_b*_b)-nk;
y = (_index%(_b*_b)-_index%(_b))/_b-nk;
k_array[ind] = 2.*M_PI/L*sqrt((double)(x*x + y*y + z*z));
scalar_rpy_ewald_recip(k_array[ind], xi, &m2_array[ind]);
kvec_array[3*ind ] = 2.*M_PI/L*x;
kvec_array[3*ind+1] = 2.*M_PI/L*y;
kvec_array[3*ind+2] = 2.*M_PI/L*z;
ind++;
}
// #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, ap, ap0, _b, temp, eye3_coef, _index, rvec0, rvec, x, y, z, r, r2, m11, m12, a3 )
cilk_for (int _index1 = np*(np-1)/2-1; _index1>=0; _index1--){
int i, j, _b, _index, x, y, z;
double *ap, *ap0, eye3_coef, r, r2, m11, m12, a3;
__declspec(align(64)) double rvec[8];
__declspec(align(64)) double rvec0[8];
__declspec(align(64)) double temp[8];
i = np-1-(int)((1+sqrt(8*_index1+1))/2);
j = np-1-_index1 + (int)((1+sqrt(8*_index1+1))/2)*((int)((1+sqrt(8*_index1+1))/2)-1)/2;
temp[0] = 0.;
temp[1] = 0.; temp[3] = 0.;
temp[2] = 0.; temp[4] = 0.; temp[5] = 0.;
eye3_coef = 0.;
rvec0[0] = pos[3*i] - pos[3*j];
rvec0[1] = pos[3*i+1] - pos[3*j+1];
rvec0[2] = pos[3*i+2] - pos[3*j+2];
a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]);
_b = (2*nr+1);
//shared(eye3_coef, temp, rvec0, L, xi, a3, m11, m12, _b, xi3, xi5, xi7, xi)
////// #pragma omp parallel for schedule(static) private(rvec, x, y, z, r, r2, m11, m12) shared(eye3_coef, temp, rvec0, a3)
for (int _index =0 ;_index < _b*_b*_b; _index++){
z =_index%(_b)-nr;// adjusting the indices
x = (_index-_index%(_b*_b))/(_b*_b)-nr;
y = (_index%(_b*_b)-_index%(_b))/_b-nr;
rvec[0] = rvec0[0] + x*L;
rvec[1] = rvec0[1] + y*L;
rvec[2] = rvec0[2] + z*L;
// compute norm
r2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
r = sqrt(r2);
rvec[0] /= r;
rvec[1] /= r;
rvec[2] /= r;
scalar_rpy_ewald_real(r, xi, a3, &m11, &m12);
eye3_coef += m11;
temp[0] += m12 * rvec[0] * rvec[0];
temp[1] += m12 * rvec[0] * rvec[1];
temp[2] += m12 * rvec[0] * rvec[2];
temp[3] += m12 * rvec[1] * rvec[1];
temp[4] += m12 * rvec[1] * rvec[2];
temp[5] += m12 * rvec[2] * rvec[2];
}
// add contribution to eye3 term
temp[0] += eye3_coef;
temp[3] += eye3_coef;
temp[5] += eye3_coef;
// sum into global matrix (only lower-triangular part)
// // Use matlab to add transpose
ap0 = &au[np*3*3*i + 3*j];
ap = ap0;
*ap++ = temp[0];
*ap++ = temp[1];
*ap = temp[2];
ap = ap0+np*3;
*ap++ = temp[1];
*ap++ = temp[3];
*ap = temp[4];
ap = ap0+np*3+np*3;
*ap++ = temp[2];
*ap++ = temp[4];
*ap = temp[5];
}
// reciprocal-space sum
// #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, temp, ap, ap0, ind, rvec, kvec, k, m2, t, a3)
cilk_for (int _index = np*(np+1)/2-1; _index>=0; _index--){
int i, j, ind;
double *ap, *ap0, k, m2, t, a3;
__declspec(align(64)) double temp[8];
__declspec(align(64)) double rvec[8];
__declspec(align(64)) double kvec[8];
i = np-1-(int)((-1+sqrt(8*_index+1))/2);
j = np-1-_index + (int)((-1+sqrt(8*_index+1))/2)*((int)((-1+sqrt(8*_index+1))/2)+1)/2;
rvec[0] = pos[3*i+0] - pos[3*j];
rvec[1] = pos[3*i+1] - pos[3*j+1];
rvec[2] = pos[3*i+2] - pos[3*j+2];
temp[0] = 0.;
temp[1] = 0.; temp[3] = 0.;
temp[2] = 0.; temp[4] = 0.; temp[5] = 0.;
a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]);
for (int ind=0; ind<vsize; ind++)
{
k = k_array[ind];
m2 = m2_array[ind];
kvec[0] = kvec_array[3*ind ];
kvec[1] = kvec_array[3*ind+1];
kvec[2] = kvec_array[3*ind+2];
t = 2.*vinv*m2*cos(kvec[0]*rvec[0] + kvec[1]*rvec[1] + kvec[2]*rvec[2])*(1.-a3*k*k/3.);
kvec[0] /= k;
kvec[1] /= k;
kvec[2] /= k;
temp[0] += t * (1. - kvec[0]*kvec[0]);
temp[1] += t * - kvec[0]*kvec[1];
temp[2] += t * - kvec[0]*kvec[2];
temp[3] += t * (1. - kvec[1]*kvec[1]);
temp[4] += t * - kvec[1]*kvec[2];
temp[5] += t * (1. - kvec[2]*kvec[2]);
}
// sum into matrix
// // sum with existing values
ap0 = &au[np*3*3*i + 3*j];
ap = ap0;
*ap++ += temp[0];
*ap++ += temp[1];
*ap += temp[2];
ap = ap0+np*3;
*ap++ += temp[1];
*ap++ += temp[3];// diagonal element
*ap += temp[4];
ap = ap0+np*3+np*3;
*ap++ += temp[2];
*ap++ += temp[4];
*ap += temp[5];// diagonal element
}
// self-part
for (int i=0; i<np; i++)// adding some term to diagonal
{
t = 1./rad[i] - (6. - 40./3.*xi*xi*rad[i]*rad[i])*xi/sqrt(M_PI);
t *= 0.5;
for (j=0; j<3; j++)
{
ind = 3*i+j;
au[ind*np*3+ind] = au[ind*np*3+ind]*0.5+t;// taking care of (i==j) condition
}
}
t_hd += time_in_seconds() - t0;
// print_matrix(au, 3*npos);
// printf("Getting the cholesky decomposition\n");
t0 = time_in_seconds();
LAPACKE_dpotrf(LAPACK_ROW_MAJOR, 'U', 3*npos, au, 3*npos);
t_cho += time_in_seconds() - t0;
// Get interations vector by multiplying l_cols by buf)
// print_matrix(au, 3*npos);
// print_matrix(au, 3*npos);
// printf("Multiplying by random gaussian vector \n");
t0 = time_in_seconds();
// generate random values from standard normal distribution
// note: this MKL function is sequential but vectorized
vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, stream, 3*npos, buf, 0., 1.);
cblas_dgemm(CblasRowMajor, CblasTrans, CblasTrans, 3*npos, 1, 3*npos, 1, au, 3*npos, buf, 3*npos, 0, hd_vec, 1);
// print_array(buf, 3*npos);
// printf("printing the correlation vector\n");
// print_array(hd_vec, 3*npos);
// update positions with Brownian displacements
// #pragma omp parallel for schedule(static) shared(pos_orig) private(i) num_threads(NTHREADS)
cilk_for (int i=0; i<3*npos; i++)
{
// pos_orig[i] += forces[i]*DELTAT+f_const*buf[i];
pos_orig[i] += forces[i]*DELTAT+f_const*hd_vec[i];
}
t_update_pos += time_in_seconds() - t0;
}
printf("--------------------------------------------------------\n");
printf("Time: %f for initiating the cell head \n", t_init_cells);
printf("Time: %f for assigning particles to cells \n", t_assign_to_cells);
printf("Time: %f for force calculations \n", t_force);
printf("Time: %f for hydrodynamic \n", t_hd);
printf("Time: %f for cholesky \n", t_cho);
printf("Time: %f for pos update \n", t_update_pos);
printf("--------------------------------------------------------\n");
return 0;
}
|
mozilla_ng_fmt_plug.c | /*
* Cracker for Mozilla's key3.db's master password.
*
* All the real logic here is borrowed from Milen Rangelov's Hashkill project
* and from Deque's article.
*
* Thanks to Jim Fougeron for all the help!
*
* This software is Copyright (c) 2014, Sanju Kholia <sanju.kholia [at]
* gmail.com> and Dhiru Kholia <dhiru [at] openwall.com>, and it is hereby
* released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mozilla;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mozilla);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // XXX
#endif
#endif
#include <stdint.h>
#include <openssl/des.h>
#include "arch.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#include "sha.h"
#define FORMAT_LABEL "Mozilla"
#define FORMAT_NAME "Mozilla key3.db"
#define FORMAT_TAG "$mozilla$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "SHA1 3DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$mozilla$*3*20*1*5199adfab24e85e3f308bacf692115f23dcd4f8f*11*2a864886f70d010c050103*16*9debdebd4596b278de029b2b2285ce2e*20*2c4d938ccb3f7f1551262185ccee947deae3b8ae", "12345678"},
{"$mozilla$*3*20*1*4f184f0d3c91cf52ee9190e65389b4d4c8fc66f2*11*2a864886f70d010c050103*16*590d1771368107d6be64844780707787*20*b8458c712ffcc2ff938409804cf3805e4bb7d722", "openwall"},
{"$mozilla$*3*20*1*897f35ff10348f0d3a7739dbf0abddc62e2e64c3*11*2a864886f70d010c050103*16*1851b917997b3119f82b8841a764db62*20*197958dd5e114281f59f9026ad8b7cfe3de7196a", "password"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
SHA_CTX pctx;
int global_salt_length;
unsigned char global_salt[20];
int local_salt_length; // entry-salt (ES)
unsigned char local_salt[20];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *keepptr;
int res;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return 0;
keepptr=strdup(ciphertext);
p = &keepptr[TAG_LENGTH];
if (*p != '*')
goto err;
++p;
if ((p = strtokm(p, "*")) == NULL) /* version */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res != 3) /* we only know about this particular version */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* local_salt_length */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* nnLen (we ignore nnlen) */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* local_salt */
goto err;
if (strlen(p) /2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* oidDatalen */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* oidData */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* password_check_length */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* password_check */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* global_salt_length */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* global_salt */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keepptr);
return 1;
err:
MEM_FREE(keepptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
int i;
static struct custom_salt cs;
char *p, *q;
memset(&cs, 0, SALT_SIZE); // cs.local_salt needs to be zero padded to length 20
p = ciphertext + TAG_LENGTH;
q = strchr(p, '*'); // version
p = q + 1;
q = strchr(p, '*'); // local_salt_length
p = q + 1;
cs.local_salt_length = atoi(p);
q = strchr(p, '*'); // nnLen
p = q + 1;
q = strchr(p, '*'); // local_salt
p = q + 1;
for (i = 0; i < cs.local_salt_length; i++)
cs.local_salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) |
atoi16[ARCH_INDEX(p[2 * i + 1])];
q = strchr(p, '*'); // oidLen (unused)
p = q + 1;
q = strchr(p, '*'); // oidData (unused)
p = q + 1;
q = strchr(p, '*'); // password_check_length
p = q + 1;
// Not stored in salt. This is the binary length
q = strchr(p, '*'); // password_check
p = q + 1;
// Not stored in salt, this is the binary.
q = strchr(p, '*'); // global_salt_length
p = q + 1;
cs.global_salt_length = atoi(p);
q = strchr(p, '*'); // global_salt
p = q + 1;
for (i = 0; i < cs.global_salt_length; i++)
cs.global_salt[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
// Calculate partial sha1 data for password hashing
SHA1_Init(&cs.pctx);
SHA1_Update(&cs.pctx, cs.global_salt, cs.global_salt_length);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p, *q;
int i;
p = ciphertext + TAG_LENGTH;
q = strchr(p, '*'); // version
p = q + 1;
q = strchr(p, '*'); // local_salt_length
p = q + 1;
q = strchr(p, '*'); // nnLen
p = q + 1;
q = strchr(p, '*'); // local_salt
p = q + 1;
q = strchr(p, '*'); // oidLen (unused)
p = q + 1;
q = strchr(p, '*'); // oidData (unused)
p = q + 1;
q = strchr(p, '*'); // password_check_length
p = q + 1;
q = strchr(p, '*'); // password_check
p = q + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
// http://www.drh-consultancy.demon.co.uk/key3.html
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
SHA_CTX ctx, ctxi, ctxo;
int i;
union {
unsigned char uc[64];
uint32_t ui[64/4];
} pad;
unsigned char buffer[20];
unsigned char tk[20];
unsigned char key[40];
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
// HP = SHA1(global-salt||password)
// Copy already calculated partial hash data
memcpy(&ctx, &cur_salt->pctx, sizeof(SHA_CTX));
SHA1_Update(&ctx, saved_key[index], saved_len[index]);
SHA1_Final(buffer, &ctx);
// CHP = SHA1(HP||entry-salt) // entry-salt (ES) is local_salt
SHA1_Init(&ctx);
SHA1_Update(&ctx, buffer, 20);
SHA1_Update(&ctx, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctx);
// Step 0 for all hmac, store off the first half (the key is the same for all 3)
// this will avoid having to setup the ipad/opad 2 times, and also avoids 4 SHA calls
// reducing the hmac calls from 12 SHA limbs, down to 8 and ipad/opad loads from 3
// down to 1. It adds 4 CTX memcpy's, but that is a very fair trade off.
SHA1_Init(&ctxi);
SHA1_Init(&ctxo);
memset(pad.uc, 0x36, 64);
for (i = 0; i < 20; ++i)
pad.uc[i] ^= buffer[i];
SHA1_Update(&ctxi, pad.uc, 64);
for (i = 0; i < 64/4; ++i)
pad.ui[i] ^= 0x36363636^0x5c5c5c5c;
SHA1_Update(&ctxo, pad.uc, 64);
// k1 = HMAC(PES||ES) // use CHP as the key, PES is ES which is zero padded to length 20
// NOTE, memcpy ctxi/ctxo to harvest off the preloaded hmac key
memcpy(&ctx, &ctxi, sizeof(ctx));
SHA1_Update(&ctx, cur_salt->local_salt, 20);
SHA1_Update(&ctx, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctx);
memcpy(&ctx, &ctxo, sizeof(ctx));
SHA1_Update(&ctx, buffer, 20);
SHA1_Final(key, &ctx);
// tk = HMAC(PES) // use CHP as the key
// NOTE, memcpy ctxi/ctxo to harvest off the preloaded hmac key
memcpy(&ctx, &ctxi, sizeof(ctx));
SHA1_Update(&ctx, cur_salt->local_salt, 20);
SHA1_Final(buffer, &ctx);
memcpy(&ctx, &ctxo, sizeof(ctx));
SHA1_Update(&ctx, buffer, 20);
SHA1_Final(tk, &ctx);
// k2 = HMAC(tk||ES) // use CHP as the key
// NOTE, ctxi and ctxo are no longer needed after this hmac, so we simply use them
SHA1_Update(&ctxi, tk, 20);
SHA1_Update(&ctxi, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctxi);
SHA1_Update(&ctxo, buffer, 20);
SHA1_Final(key+20, &ctxo);
// k = k1||k2 // encrypt "password-check" string using this key
DES_set_key((DES_cblock *) key, &ks1);
DES_set_key((DES_cblock *) (key+8), &ks2);
DES_set_key((DES_cblock *) (key+16), &ks3);
memcpy(ivec, key + 32, 8); // last 8 bytes!
// PKCS#5 padding (standard block padding)
DES_ede3_cbc_encrypt((unsigned char*)"password-check\x02\x02", (unsigned char*)crypt_out[index], 16, &ks1, &ks2, &ks3, &ivec, DES_ENCRYPT);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void mozilla_set_key(char *key, int index)
{
saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mozilla = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
BINARY_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
mozilla_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
apply_constant_vectorvalue_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_APPLY_CONSTANT_VECTORVALUE_PROCESS_H_INCLUDED )
#define KRATOS_APPLY_CONSTANT_VECTORVALUE_PROCESS_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/kratos_flags.h"
#include "includes/kratos_parameters.h"
#include "processes/process.h"
namespace Kratos
{
///@name Kratos Classes
///@{
/// The base class for all processes in Kratos.
/** This function applies a constant value (and fixity) to all of the nodes in a given mesh
* TODO: still segfaults if the mesh to which it is applied is not existing
*/
class ApplyConstantVectorValueProcess : public Process
{
public:
///@name Type Definitions
///@{
KRATOS_DEFINE_LOCAL_FLAG(X_COMPONENT_FIXED);
KRATOS_DEFINE_LOCAL_FLAG(Y_COMPONENT_FIXED);
KRATOS_DEFINE_LOCAL_FLAG(Z_COMPONENT_FIXED);
/// Pointer definition of ApplyConstantVectorValueProcess
KRATOS_CLASS_POINTER_DEFINITION(ApplyConstantVectorValueProcess);
///@}
///@name Life Cycle
///@{
/// Default constructor.
ApplyConstantVectorValueProcess(ModelPart& model_part,
Parameters parameters
) : Process(Flags()), mr_model_part(model_part)
{
KRATOS_TRY
Parameters default_parameters( R"(
{
"model_part_name":"PLEASE_CHOOSE_MODEL_PART_NAME",
"mesh_id": 0,
"variable_name": "PLEASE_PRESCRIBE_VARIABLE_NAME",
"is_fixed_x": false,
"is_fixed_y": false,
"is_fixed_z": false,
"modulus" : 1.0,
"direction": [1.0, 0.0, 0.0]
} )" );
// Some values need to be mandatorily prescribed since no meaningful default value exist. For this reason try accessing to them
// So that an error is thrown if they don't exist
if(parameters["direction"].IsArray() == true && parameters["direction"].size() != 3)
{
KRATOS_THROW_ERROR(std::runtime_error,"direction vector is not a vector or it does not have size 3. Direction vector currently passed",parameters.PrettyPrintJsonString());
}
if(parameters["modulus"].IsNumber() == false)
{
KRATOS_THROW_ERROR(std::runtime_error,"modulus shall be a number. Parameter list in which is included is :", parameters.PrettyPrintJsonString());
}
if(parameters["variable_name"].IsString() == false)
{
KRATOS_THROW_ERROR(std::runtime_error,"vairbale_name shall be a String. Parameter list in which is included is :", parameters.PrettyPrintJsonString());
}
if(parameters["model_part_name"].IsString() == false)
{
KRATOS_THROW_ERROR(std::runtime_error,"model_part_name shall be a String. Parameter list in which is included is :", parameters.PrettyPrintJsonString());
}
//now validate agains defaults -- this also ensures no type mismatch
parameters.ValidateAndAssignDefaults(default_parameters);
// Read from the parameters and assign to the values
mmesh_id = parameters["mesh_id"].GetInt();
this->Set(X_COMPONENT_FIXED, parameters["is_fixed_x"].GetBool());
this->Set(Y_COMPONENT_FIXED, parameters["is_fixed_y"].GetBool());
this->Set(Z_COMPONENT_FIXED, parameters["is_fixed_z"].GetBool());
// Get the modulus and variable name
mvariable_name = parameters["variable_name"].GetString();
mmodulus = parameters["modulus"].GetDouble();
// mvalue = parameters["value"].GetDouble();
mdirection.resize(3,false);
mdirection[0] = parameters["direction"][0].GetDouble();
mdirection[1] = parameters["direction"][1].GetDouble();
mdirection[2] = parameters["direction"][2].GetDouble();
const double dim_norm = norm_2(mdirection);
if(dim_norm < 1e-20)
{
KRATOS_THROW_ERROR(std::runtime_error," Norm of direction given is approximately zero. Please give a direction vector with a non zero norm : current value of direction vector = ",mdirection);
}
// Normalize the direction
mdirection /= dim_norm;
if(KratosComponents< Variable<array_1d<double,3> > >::Has(mvariable_name) == false)
{
KRATOS_THROW_ERROR(std::runtime_error,"Not defined the variable ",mvariable_name);
}
const Variable<array_1d<double,3> >& rVariable = KratosComponents< Variable<array_1d<double,3> > >::Get(mvariable_name);
if(mmesh_id >= model_part.NumberOfMeshes())
{
KRATOS_THROW_ERROR(std::runtime_error,"Mesh does not exist in model_part: mesh id is --> ",mmesh_id);
}
if( model_part.GetNodalSolutionStepVariablesList().Has(rVariable) == false )
{
std::string err_msg = std::string("Trying to fix a variable that is not in the model_part - variable: ")+mvariable_name;
KRATOS_THROW_ERROR(std::runtime_error,err_msg,mvariable_name);
}
if(mdirection.size() != 3)
{
KRATOS_THROW_ERROR(std::runtime_error,"Direction vector is expected to have size 3. Direction vector currently passed",mdirection);
}
typedef Variable<double> component_type;
if(KratosComponents< component_type >::Has(mvariable_name+std::string("_X")) == false)
{
KRATOS_THROW_ERROR(std::runtime_error,"Not defined the variable ",mvariable_name+std::string("_X"));
}
if(KratosComponents< component_type >::Has(mvariable_name+std::string("_Y")) == false)
{
KRATOS_THROW_ERROR(std::runtime_error,"Not defined the variable ",mvariable_name+std::string("_Y"));
}
if(KratosComponents< component_type >::Has(mvariable_name+std::string("_Z")) == false)
{
KRATOS_THROW_ERROR(std::runtime_error,"Not defined the variable ",mvariable_name+std::string("_Z"));
}
KRATOS_CATCH("");
}
ApplyConstantVectorValueProcess(ModelPart& model_part,
const Variable< array_1d<double, 3 > >& rVariable,
const double modulus,
const Vector direction,
std::size_t mesh_id,
Flags options
) : Process(options) , mr_model_part(model_part), mmodulus(modulus),mdirection(direction),mmesh_id(mesh_id)
{
KRATOS_TRY;
if(mesh_id >= model_part.NumberOfMeshes())
{
KRATOS_THROW_ERROR(std::runtime_error,"Mesh does not exist in model_part: mesh id is --> ",mesh_id);
}
if(this->IsDefined(X_COMPONENT_FIXED) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"Please specify if component x is to be fixed or not (flag X_COMPONENT_FIXED)","");
}
if(this->IsDefined(Y_COMPONENT_FIXED) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"Please specify if component y is to be fixed or not (flag Y_COMPONENT_FIXED)","");
}
if(this->IsDefined(Z_COMPONENT_FIXED) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"Please specify if the variable is to be fixed or not (flag Z_COMPONENT_FIXED)","");
}
mvariable_name = rVariable.Name();
if( model_part.GetNodalSolutionStepVariablesList().Has(rVariable) == false )
{
std::string err_msg = std::string("Trying to fix a variable that is not in the model_part - variable: ")+mvariable_name;
KRATOS_THROW_ERROR(std::runtime_error,err_msg,mvariable_name);
}
if(direction.size() != 3)
{
KRATOS_THROW_ERROR(std::runtime_error,"Direction vector is expected to have size 3. Direction vector currently passed",mdirection);
}
typedef Variable<double> component_type;
if(KratosComponents< component_type >::Has(mvariable_name+std::string("_X")) == false)
{
KRATOS_THROW_ERROR(std::runtime_error,"Not defined the variable ",mvariable_name+std::string("_X"));
}
if(KratosComponents< component_type >::Has(mvariable_name+std::string("_Y")) == false)
{
KRATOS_THROW_ERROR(std::runtime_error,"Not defined the variable ",mvariable_name+std::string("_Y"));
}
if(KratosComponents< component_type >::Has(mvariable_name+std::string("_Z")) == false)
{
KRATOS_THROW_ERROR(std::runtime_error,"Not defined the variable ",mvariable_name+std::string("_Z"));
}
KRATOS_CATCH("");
}
/// Destructor.
~ApplyConstantVectorValueProcess() override {}
///@}
///@name Operators
///@{
/// This operator is provided to call the process as a function and simply calls the Execute method.
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
/// Execute method is used to execute the ApplyConstantVectorValueProcess algorithms.
void Execute() override {}
/// this function is designed for being called at the beginning of the computations
/// right after reading the model and the groups
void ExecuteInitialize() override
{
//compute the value to be applied
array_1d<double,3> value = mmodulus*mdirection;
typedef Variable<double> component_type;
const component_type& varx = KratosComponents< component_type >::Get(mvariable_name+std::string("_X"));
const component_type& vary = KratosComponents< component_type >::Get(mvariable_name+std::string("_Y"));
const component_type& varz = KratosComponents< component_type >::Get(mvariable_name+std::string("_Z"));
InternalApplyValue<component_type >(varx, this->Is(X_COMPONENT_FIXED), value[0]);
InternalApplyValue<component_type >(vary, this->Is(Y_COMPONENT_FIXED), value[1]);
InternalApplyValue<component_type >(varz, this->Is(Z_COMPONENT_FIXED), value[2]);
}
/// this function is designed for being execute once before the solution loop but after all of the
/// solvers where built
void ExecuteBeforeSolutionLoop() override
{
}
/// this function will be executed at every time step BEFORE performing the solve phase
void ExecuteInitializeSolutionStep() override
{
}
/// this function will be executed at every time step AFTER performing the solve phase
void ExecuteFinalizeSolutionStep() override
{
}
/// this function will be executed at every time step BEFORE writing the output
void ExecuteBeforeOutputStep() override
{
}
/// this function will be executed at every time step AFTER writing the output
void ExecuteAfterOutputStep() override
{
}
/// this function is designed for being called at the end of the computations
/// right after reading the model and the groups
void ExecuteFinalize() override
{
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ApplyConstantVectorValueProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "ApplyConstantVectorValueProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
ModelPart& mr_model_part;
std::string mvariable_name;
double mmodulus;
Vector mdirection;
std::size_t mmesh_id;
private:
///@name Static Member Variables
///@{
template< class TVarType >
void InternalApplyValue(const TVarType& rVar, const bool to_be_fixed, const double value)
{
const int nnodes = mr_model_part.GetMesh(mmesh_id).Nodes().size();
if(nnodes != 0)
{
ModelPart::NodesContainerType::iterator it_begin = mr_model_part.GetMesh(mmesh_id).NodesBegin();
// ModelPart::NodesContainerType::iterator it_end = mr_model_part.GetMesh(mmesh_id).NodesEnd();
//check if the dofs are there (on the first node)
if(to_be_fixed && (it_begin->HasDofFor(rVar) == false) )
{
KRATOS_THROW_ERROR(std::runtime_error, " Trying to fix a dofs which was not allocated. Variable is --> ",rVar.Name() );
}
#pragma omp parallel for
for(int i = 0; i<nnodes; i++)
{
ModelPart::NodesContainerType::iterator it = it_begin + i;
if(to_be_fixed)
{
it->Fix(rVar);
}
it->FastGetSolutionStepValue(rVar) = value;
}
}
}
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
ApplyConstantVectorValueProcess& operator=(ApplyConstantVectorValueProcess const& rOther);
/// Copy constructor.
//ApplyConstantVectorValueProcess(ApplyConstantVectorValueProcess const& rOther);
///@}
}; // Class ApplyConstantVectorValueProcess
KRATOS_CREATE_LOCAL_FLAG(ApplyConstantVectorValueProcess,X_COMPONENT_FIXED, 0);
KRATOS_CREATE_LOCAL_FLAG(ApplyConstantVectorValueProcess,Y_COMPONENT_FIXED, 1);
KRATOS_CREATE_LOCAL_FLAG(ApplyConstantVectorValueProcess,Z_COMPONENT_FIXED, 2);
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
ApplyConstantVectorValueProcess& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const ApplyConstantVectorValueProcess& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_APPLY_CONSTANT_VECTORVALUE_PROCESS_H_INCLUDED defined
|
zgetrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
******************************************************************************/
int plasma_zgetrf(int m, int n,
plasma_complex64_t *pA, int lda, int *ipiv)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_getrf(plasma, PlasmaComplexDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
// Call the tile async function.
plasma_omp_zgetrf(A, ipiv, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request);
}
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
******************************************************************************/
void plasma_omp_zgetrf(plasma_desc_t A, int *ipiv,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0 || A.n == 0)
return;
// Call the parallel function.
plasma_pzgetrf(A, ipiv, sequence, request);
}
|
ast-dump-openmp-ordered.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one() {
#pragma omp ordered
;
}
void test_two(int x) {
#pragma omp for ordered
for (int i = 0; i < x; i++)
;
}
void test_three(int x) {
#pragma omp for ordered(1)
for (int i = 0; i < x; i++) {
#pragma omp ordered depend(source)
}
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-ordered.c:3:1, line:6:1> line:3:6 test_one 'void ()'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:17, line:6:1>
// CHECK-NEXT: | `-OMPOrderedDirective {{.*}} <line:4:1, col:20>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-NullStmt {{.*}} <col:3> openmp_structured_block
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:4:1) *const restrict'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:8:1, line:12:1> line:8:6 test_two 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:12:1>
// CHECK-NEXT: | `-OMPForDirective {{.*}} <line:9:1, col:24>
// CHECK-NEXT: | |-OMPOrderedClause {{.*}} <col:17, col:24>
// CHECK-NEXT: | | `-<<<NULL>>>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:10:3, line:11:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:10:3, line:11:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:10:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:11:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:9:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:9:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:10:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:14:1, line:19:1> line:14:6 test_three 'void (int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:24, line:19:1>
// CHECK-NEXT: `-OMPForDirective {{.*}} <line:15:1, col:27>
// CHECK-NEXT: |-OMPOrderedClause {{.*}} <col:17, col:26>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:25> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:25> 'int' 1
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:16:3, line:18:3>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:16:3, line:18:3>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:16:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-CompoundStmt {{.*}} <col:31, line:18:3> openmp_structured_block
// CHECK-NEXT: | | `-OMPOrderedDirective {{.*}} <line:17:1, col:35> openmp_standalone_directive
// CHECK-NEXT: | | |-OMPDependClause {{.*}} <col:21, <invalid sloc>>
// CHECK-NEXT: | | `-<<<NULL>>>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:15:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-ordered.c:15:1) *const restrict'
// CHECK-NEXT: | `-VarDecl {{.*}} <line:16:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
|
tree-pretty-print.c | /* Pretty formatting of GENERIC trees in C syntax.
Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
Adapted from c-pretty-print.c by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "diagnostic.h"
#include "real.h"
#include "hashtab.h"
#include "tree-flow.h"
#include "langhooks.h"
#include "tree-iterator.h"
#include "tree-chrec.h"
#include "tree-pass.h"
/* Local functions, macros and variables. */
static int op_prio (tree);
static const char *op_symbol_1 (enum tree_code);
static const char *op_symbol (tree);
static void pretty_print_string (pretty_printer *, const char*);
static void print_call_name (pretty_printer *, tree);
static void newline_and_indent (pretty_printer *, int);
static void maybe_init_pretty_print (FILE *);
static void print_declaration (pretty_printer *, tree, int, int);
static void print_struct_decl (pretty_printer *, tree, int, int);
static void do_niy (pretty_printer *, tree);
static void dump_vops (pretty_printer *, tree, int, int);
static void dump_generic_bb_buff (pretty_printer *, basic_block, int, int);
#define INDENT(SPACE) do { \
int i; for (i = 0; i<SPACE; i++) pp_space (buffer); } while (0)
#define NIY do_niy(buffer,node)
#define PRINT_FUNCTION_NAME(NODE) pp_printf \
(buffer, "%s", TREE_CODE (NODE) == NOP_EXPR ? \
lang_hooks.decl_printable_name (TREE_OPERAND (NODE, 0), 1) : \
lang_hooks.decl_printable_name (NODE, 1))
static pretty_printer buffer;
static int initialized = 0;
/* Try to print something for an unknown tree code. */
static void
do_niy (pretty_printer *buffer, tree node)
{
int i, len;
pp_string (buffer, "<<< Unknown tree: ");
pp_string (buffer, tree_code_name[(int) TREE_CODE (node)]);
if (EXPR_P (node))
{
len = TREE_CODE_LENGTH (TREE_CODE (node));
for (i = 0; i < len; ++i)
{
newline_and_indent (buffer, 2);
dump_generic_node (buffer, TREE_OPERAND (node, i), 2, 0, false);
}
}
pp_string (buffer, " >>>\n");
}
void
debug_generic_expr (tree t)
{
print_generic_expr (stderr, t, TDF_VOPS|TDF_UID);
fprintf (stderr, "\n");
}
void
debug_generic_stmt (tree t)
{
print_generic_stmt (stderr, t, TDF_VOPS|TDF_UID);
fprintf (stderr, "\n");
}
void
debug_tree_chain (tree t)
{
while (t)
{
print_generic_expr (stderr, t, TDF_VOPS|TDF_UID);
fprintf(stderr, " ");
t = TREE_CHAIN (t);
}
fprintf (stderr, "\n");
}
/* Prints declaration DECL to the FILE with details specified by FLAGS. */
void
print_generic_decl (FILE *file, tree decl, int flags)
{
maybe_init_pretty_print (file);
print_declaration (&buffer, decl, 2, flags);
pp_write_text_to_stream (&buffer);
}
/* Print tree T, and its successors, on file FILE. FLAGS specifies details
to show in the dump. See TDF_* in tree.h. */
void
print_generic_stmt (FILE *file, tree t, int flags)
{
maybe_init_pretty_print (file);
dump_generic_node (&buffer, t, 0, flags, true);
pp_flush (&buffer);
}
/* Print tree T, and its successors, on file FILE. FLAGS specifies details
to show in the dump. See TDF_* in tree.h. The output is indented by
INDENT spaces. */
void
print_generic_stmt_indented (FILE *file, tree t, int flags, int indent)
{
int i;
maybe_init_pretty_print (file);
for (i = 0; i < indent; i++)
pp_space (&buffer);
dump_generic_node (&buffer, t, indent, flags, true);
pp_flush (&buffer);
}
/* Print a single expression T on file FILE. FLAGS specifies details to show
in the dump. See TDF_* in tree.h. */
void
print_generic_expr (FILE *file, tree t, int flags)
{
maybe_init_pretty_print (file);
dump_generic_node (&buffer, t, 0, flags, false);
}
/* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set
in FLAGS. */
static void
dump_decl_name (pretty_printer *buffer, tree node, int flags)
{
tree t = node;
if (DECL_NAME (t))
pp_tree_identifier (buffer, DECL_NAME (t));
if ((flags & TDF_UID)
|| DECL_NAME (t) == NULL_TREE)
{
if (TREE_CODE (t) == LABEL_DECL
&& LABEL_DECL_UID (t) != -1)
pp_printf (buffer, "L." HOST_WIDE_INT_PRINT_DEC,
LABEL_DECL_UID (t));
else
{
char c = TREE_CODE (t) == CONST_DECL ? 'C' : 'D';
pp_printf (buffer, "%c.%u", c, DECL_UID (t));
}
}
}
/* Like the above, but used for pretty printing function calls. */
static void
dump_function_name (pretty_printer *buffer, tree node)
{
if (DECL_NAME (node))
PRINT_FUNCTION_NAME (node);
else
dump_decl_name (buffer, node, 0);
}
/* Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and
FLAGS are as in dump_generic_node. */
static void
dump_function_declaration (pretty_printer *buffer, tree node,
int spc, int flags)
{
bool wrote_arg = false;
tree arg;
pp_space (buffer);
pp_character (buffer, '(');
/* Print the argument types. The last element in the list is a VOID_TYPE.
The following avoids printing the last element. */
arg = TYPE_ARG_TYPES (node);
while (arg && TREE_CHAIN (arg) && arg != error_mark_node)
{
wrote_arg = true;
dump_generic_node (buffer, TREE_VALUE (arg), spc, flags, false);
arg = TREE_CHAIN (arg);
if (TREE_CHAIN (arg) && TREE_CODE (TREE_CHAIN (arg)) == TREE_LIST)
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
if (!wrote_arg)
pp_string (buffer, "void");
pp_character (buffer, ')');
}
/* Dump the domain associated with an array. */
static void
dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags)
{
pp_character (buffer, '[');
if (domain)
{
tree min = TYPE_MIN_VALUE (domain);
tree max = TYPE_MAX_VALUE (domain);
if (min && max
&& integer_zerop (min)
&& host_integerp (max, 0))
pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1);
else
{
if (min)
dump_generic_node (buffer, min, spc, flags, false);
pp_character (buffer, ':');
if (max)
dump_generic_node (buffer, max, spc, flags, false);
}
}
else
pp_string (buffer, "<unknown>");
pp_character (buffer, ']');
}
/* Dump OpenMP clause CLAUSE. BUFFER, CLAUSE, SPC and FLAGS are as in
dump_generic_node. */
static void
dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags)
{
const char *name;
switch (OMP_CLAUSE_CODE (clause))
{
case OMP_CLAUSE_PRIVATE:
name = "private";
goto print_remap;
case OMP_CLAUSE_SHARED:
name = "shared";
goto print_remap;
case OMP_CLAUSE_FIRSTPRIVATE:
name = "firstprivate";
goto print_remap;
case OMP_CLAUSE_LASTPRIVATE:
name = "lastprivate";
goto print_remap;
case OMP_CLAUSE_COPYIN:
name = "copyin";
goto print_remap;
case OMP_CLAUSE_COPYPRIVATE:
name = "copyprivate";
goto print_remap;
print_remap:
pp_string (buffer, name);
pp_character (buffer, '(');
dump_generic_node (buffer, OMP_CLAUSE_DECL (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_REDUCTION:
pp_string (buffer, "reduction(");
pp_string (buffer, op_symbol_1 (OMP_CLAUSE_REDUCTION_CODE (clause)));
pp_character (buffer, ':');
dump_generic_node (buffer, OMP_CLAUSE_DECL (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_IF:
pp_string (buffer, "if(");
dump_generic_node (buffer, OMP_CLAUSE_IF_EXPR (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_NUM_THREADS:
pp_string (buffer, "num_threads(");
dump_generic_node (buffer, OMP_CLAUSE_NUM_THREADS_EXPR (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_NOWAIT:
pp_string (buffer, "nowait");
break;
case OMP_CLAUSE_ORDERED:
pp_string (buffer, "ordered");
break;
case OMP_CLAUSE_DEFAULT:
pp_string (buffer, "default(");
switch (OMP_CLAUSE_DEFAULT_KIND (clause))
{
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
break;
case OMP_CLAUSE_DEFAULT_SHARED:
pp_string (buffer, "shared");
break;
case OMP_CLAUSE_DEFAULT_NONE:
pp_string (buffer, "none");
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
pp_string (buffer, "private");
break;
default:
gcc_unreachable ();
}
pp_character (buffer, ')');
break;
case OMP_CLAUSE_SCHEDULE:
pp_string (buffer, "schedule(");
switch (OMP_CLAUSE_SCHEDULE_KIND (clause))
{
case OMP_CLAUSE_SCHEDULE_STATIC:
pp_string (buffer, "static");
break;
case OMP_CLAUSE_SCHEDULE_DYNAMIC:
pp_string (buffer, "dynamic");
break;
case OMP_CLAUSE_SCHEDULE_GUIDED:
pp_string (buffer, "guided");
break;
case OMP_CLAUSE_SCHEDULE_RUNTIME:
pp_string (buffer, "runtime");
break;
default:
gcc_unreachable ();
}
if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause))
{
pp_character (buffer, ',');
dump_generic_node (buffer,
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause),
spc, flags, false);
}
pp_character (buffer, ')');
break;
default:
/* Should never happen. */
dump_generic_node (buffer, clause, spc, flags, false);
break;
}
}
/* Dump the list of OpenMP clauses. BUFFER, SPC and FLAGS are as in
dump_generic_node. */
static void
dump_omp_clauses (pretty_printer *buffer, tree clause, int spc, int flags)
{
if (clause == NULL)
return;
pp_space (buffer);
while (1)
{
dump_omp_clause (buffer, clause, spc, flags);
clause = OMP_CLAUSE_CHAIN (clause);
if (clause == NULL)
return;
pp_space (buffer);
}
}
/* Dump the node NODE on the pretty_printer BUFFER, SPC spaces of indent.
FLAGS specifies details to show in the dump (see TDF_* in tree.h). If
IS_STMT is true, the object printed is considered to be a statement
and it is terminated by ';' if appropriate. */
int
dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
bool is_stmt)
{
tree type;
tree op0, op1;
const char *str;
bool is_expr;
if (node == NULL_TREE)
return spc;
is_expr = EXPR_P (node);
if (TREE_CODE (node) != ERROR_MARK
&& is_gimple_stmt (node)
&& (flags & TDF_VOPS)
&& stmt_ann (node)
&& TREE_CODE (node) != PHI_NODE)
dump_vops (buffer, node, spc, flags);
if (is_stmt && (flags & TDF_STMTADDR))
pp_printf (buffer, "<&%p> ", (void *)node);
if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node))
{
expanded_location xloc = expand_location (EXPR_LOCATION (node));
pp_character (buffer, '[');
if (xloc.file)
{
pp_string (buffer, xloc.file);
pp_string (buffer, " : ");
}
pp_decimal_int (buffer, xloc.line);
pp_string (buffer, "] ");
}
switch (TREE_CODE (node))
{
case ERROR_MARK:
pp_string (buffer, "<<< error >>>");
break;
case IDENTIFIER_NODE:
pp_tree_identifier (buffer, node);
break;
case TREE_LIST:
while (node && node != error_mark_node)
{
if (TREE_PURPOSE (node))
{
dump_generic_node (buffer, TREE_PURPOSE (node), spc, flags, false);
pp_space (buffer);
}
dump_generic_node (buffer, TREE_VALUE (node), spc, flags, false);
node = TREE_CHAIN (node);
if (node && TREE_CODE (node) == TREE_LIST)
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
break;
case TREE_BINFO:
dump_generic_node (buffer, BINFO_TYPE (node), spc, flags, false);
case TREE_VEC:
{
size_t i;
if (TREE_VEC_LENGTH (node) > 0)
{
size_t len = TREE_VEC_LENGTH (node);
for (i = 0; i < len - 1; i++)
{
dump_generic_node (buffer, TREE_VEC_ELT (node, i), spc, flags,
false);
pp_character (buffer, ',');
pp_space (buffer);
}
dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc,
flags, false);
}
}
break;
case VOID_TYPE:
case INTEGER_TYPE:
case REAL_TYPE:
case COMPLEX_TYPE:
case VECTOR_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
{
unsigned int quals = TYPE_QUALS (node);
enum tree_code_class class;
if (quals & TYPE_QUAL_CONST)
pp_string (buffer, "const ");
else if (quals & TYPE_QUAL_VOLATILE)
pp_string (buffer, "volatile ");
else if (quals & TYPE_QUAL_RESTRICT)
pp_string (buffer, "restrict ");
class = TREE_CODE_CLASS (TREE_CODE (node));
if (class == tcc_declaration)
{
if (DECL_NAME (node))
dump_decl_name (buffer, node, flags);
else
pp_string (buffer, "<unnamed type decl>");
}
else if (class == tcc_type)
{
if (TYPE_NAME (node))
{
if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE)
pp_tree_identifier (buffer, TYPE_NAME (node));
else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL
&& DECL_NAME (TYPE_NAME (node)))
dump_decl_name (buffer, TYPE_NAME (node), flags);
else
pp_string (buffer, "<unnamed type>");
}
else if (TREE_CODE (node) == VECTOR_TYPE)
{
pp_string (buffer, "vector ");
dump_generic_node (buffer, TREE_TYPE (node),
spc, flags, false);
}
else
pp_string (buffer, "<unnamed type>");
}
break;
}
case POINTER_TYPE:
case REFERENCE_TYPE:
str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&");
if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE)
{
tree fnode = TREE_TYPE (node);
dump_generic_node (buffer, TREE_TYPE (fnode), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '(');
pp_string (buffer, str);
if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node)))
dump_decl_name (buffer, TYPE_NAME (node), flags);
else
pp_printf (buffer, "<T%x>", TYPE_UID (node));
pp_character (buffer, ')');
dump_function_declaration (buffer, fnode, spc, flags);
}
else
{
unsigned int quals = TYPE_QUALS (node);
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_space (buffer);
pp_string (buffer, str);
if (quals & TYPE_QUAL_CONST)
pp_string (buffer, " const");
else if (quals & TYPE_QUAL_VOLATILE)
pp_string (buffer, "volatile");
else if (quals & TYPE_QUAL_RESTRICT)
pp_string (buffer, " restrict");
if (TYPE_REF_CAN_ALIAS_ALL (node))
pp_string (buffer, " {ref-all}");
}
break;
case OFFSET_TYPE:
NIY;
break;
case METHOD_TYPE:
dump_decl_name (buffer, TYPE_NAME (TYPE_METHOD_BASETYPE (node)), flags);
pp_string (buffer, "::");
break;
case TARGET_MEM_REF:
{
const char *sep = "";
tree tmp;
pp_string (buffer, "MEM[");
tmp = TMR_SYMBOL (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "symbol: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
tmp = TMR_BASE (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "base: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
tmp = TMR_INDEX (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "index: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
tmp = TMR_STEP (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "step: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
tmp = TMR_OFFSET (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "offset: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
pp_string (buffer, "]");
if (flags & TDF_DETAILS)
{
pp_string (buffer, "{");
dump_generic_node (buffer, TMR_ORIGINAL (node), spc, flags,
false);
pp_string (buffer, "}");
}
}
break;
case ARRAY_TYPE:
{
tree tmp;
/* Print the innermost component type. */
for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE;
tmp = TREE_TYPE (tmp))
;
dump_generic_node (buffer, tmp, spc, flags, false);
/* Print the dimensions. */
for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp))
dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags);
break;
}
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
/* Print the name of the structure. */
if (TREE_CODE (node) == RECORD_TYPE)
pp_string (buffer, "struct ");
else if (TREE_CODE (node) == UNION_TYPE)
pp_string (buffer, "union ");
if (TYPE_NAME (node))
dump_generic_node (buffer, TYPE_NAME (node), spc, flags, false);
else
print_struct_decl (buffer, node, spc, flags);
break;
case LANG_TYPE:
NIY;
break;
case INTEGER_CST:
if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE)
{
/* In the case of a pointer, one may want to divide by the
size of the pointed-to type. Unfortunately, this not
straightforward. The C front-end maps expressions
(int *) 5
int *p; (p + 5)
in such a way that the two INTEGER_CST nodes for "5" have
different values but identical types. In the latter
case, the 5 is multiplied by sizeof (int) in c-common.c
(pointer_int_sum) to convert it to a byte address, and
yet the type of the node is left unchanged. Argh. What
is consistent though is that the number value corresponds
to bytes (UNITS) offset.
NB: Neither of the following divisors can be trivially
used to recover the original literal:
TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node)))
TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */
pp_wide_integer (buffer, TREE_INT_CST_LOW (node));
pp_string (buffer, "B"); /* pseudo-unit */
}
else if (! host_integerp (node, 0))
{
tree val = node;
if (tree_int_cst_sgn (val) < 0)
{
pp_character (buffer, '-');
val = build_int_cst_wide (NULL_TREE,
-TREE_INT_CST_LOW (val),
~TREE_INT_CST_HIGH (val)
+ !TREE_INT_CST_LOW (val));
}
/* Would "%x%0*x" or "%x%*0x" get zero-padding on all
systems? */
{
static char format[10]; /* "%x%09999x\0" */
if (!format[0])
sprintf (format, "%%x%%0%dx", HOST_BITS_PER_INT / 4);
sprintf (pp_buffer (buffer)->digit_buffer, format,
TREE_INT_CST_HIGH (val),
TREE_INT_CST_LOW (val));
pp_string (buffer, pp_buffer (buffer)->digit_buffer);
}
}
else
pp_wide_integer (buffer, TREE_INT_CST_LOW (node));
break;
case REAL_CST:
/* Code copied from print_node. */
{
REAL_VALUE_TYPE d;
if (TREE_OVERFLOW (node))
pp_string (buffer, " overflow");
#if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
d = TREE_REAL_CST (node);
if (REAL_VALUE_ISINF (d))
pp_string (buffer, " Inf");
else if (REAL_VALUE_ISNAN (d))
pp_string (buffer, " Nan");
else
{
char string[100];
real_to_decimal (string, &d, sizeof (string), 0, 1);
pp_string (buffer, string);
}
#else
{
HOST_WIDE_INT i;
unsigned char *p = (unsigned char *) &TREE_REAL_CST (node);
pp_string (buffer, "0x");
for (i = 0; i < sizeof TREE_REAL_CST (node); i++)
output_formatted_integer (buffer, "%02x", *p++);
}
#endif
break;
}
case COMPLEX_CST:
pp_string (buffer, "__complex__ (");
dump_generic_node (buffer, TREE_REALPART (node), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_IMAGPART (node), spc, flags, false);
pp_string (buffer, ")");
break;
case STRING_CST:
pp_string (buffer, "\"");
pretty_print_string (buffer, TREE_STRING_POINTER (node));
pp_string (buffer, "\"");
break;
case VECTOR_CST:
{
tree elt;
pp_string (buffer, "{ ");
for (elt = TREE_VECTOR_CST_ELTS (node); elt; elt = TREE_CHAIN (elt))
{
dump_generic_node (buffer, TREE_VALUE (elt), spc, flags, false);
if (TREE_CHAIN (elt))
pp_string (buffer, ", ");
}
pp_string (buffer, " }");
}
break;
case FUNCTION_TYPE:
break;
case FUNCTION_DECL:
case CONST_DECL:
dump_decl_name (buffer, node, flags);
break;
case LABEL_DECL:
if (DECL_NAME (node))
dump_decl_name (buffer, node, flags);
else if (LABEL_DECL_UID (node) != -1)
pp_printf (buffer, "<L" HOST_WIDE_INT_PRINT_DEC ">",
LABEL_DECL_UID (node));
else
pp_printf (buffer, "<D%u>", DECL_UID (node));
break;
case TYPE_DECL:
if (DECL_IS_BUILTIN (node))
{
/* Don't print the declaration of built-in types. */
break;
}
if (DECL_NAME (node))
dump_decl_name (buffer, node, flags);
else
{
if ((TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (node)) == UNION_TYPE)
&& TYPE_METHODS (TREE_TYPE (node)))
{
/* The type is a c++ class: all structures have at least
4 methods. */
pp_string (buffer, "class ");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
}
else
{
pp_string (buffer,
(TREE_CODE (TREE_TYPE (node)) == UNION_TYPE
? "union" : "struct "));
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
}
}
break;
case SYMBOL_MEMORY_TAG:
case NAME_MEMORY_TAG:
case STRUCT_FIELD_TAG:
case VAR_DECL:
case PARM_DECL:
case FIELD_DECL:
case NAMESPACE_DECL:
dump_decl_name (buffer, node, flags);
break;
case RESULT_DECL:
pp_string (buffer, "<retval>");
break;
case COMPONENT_REF:
op0 = TREE_OPERAND (node, 0);
str = ".";
if (TREE_CODE (op0) == INDIRECT_REF)
{
op0 = TREE_OPERAND (op0, 0);
str = "->";
}
if (op_prio (op0) < op_prio (node))
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, ')');
pp_string (buffer, str);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
if (TREE_CODE (op0) != VALUE_HANDLE)
{
op0 = component_ref_field_offset (node);
if (op0 && TREE_CODE (op0) != INTEGER_CST)
{
pp_string (buffer, "{off: ");
dump_generic_node (buffer, op0, spc, flags, false);
pp_character (buffer, '}');
}
}
break;
case BIT_FIELD_REF:
pp_string (buffer, "BIT_FIELD_REF <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, ">");
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
op0 = TREE_OPERAND (node, 0);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, ')');
pp_character (buffer, '[');
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
if (TREE_CODE (node) == ARRAY_RANGE_REF)
pp_string (buffer, " ...");
pp_character (buffer, ']');
/* LLVM LOCAL begin */
#ifdef ENABLE_LLVM
/* Support the "pointers in ARRAY_REF" extension */
if (TREE_CODE (TREE_TYPE (op0)) != ARRAY_TYPE)
break;
#endif
/* LLVM LOCAL end */
op0 = array_ref_low_bound (node);
op1 = array_ref_element_size (node);
if (!integer_zerop (op0)
|| (TYPE_SIZE_UNIT (TREE_TYPE (node))
&& !operand_equal_p (op1, TYPE_SIZE_UNIT (TREE_TYPE (node)), 0)))
{
pp_string (buffer, "{lb: ");
dump_generic_node (buffer, op0, spc, flags, false);
pp_string (buffer, " sz: ");
dump_generic_node (buffer, op1, spc, flags, false);
pp_character (buffer, '}');
}
break;
case CONSTRUCTOR:
{
unsigned HOST_WIDE_INT ix;
tree field, val;
bool is_struct_init = FALSE;
pp_character (buffer, '{');
if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (node)) == UNION_TYPE)
is_struct_init = TRUE;
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val)
{
if (field && is_struct_init)
{
pp_character (buffer, '.');
dump_generic_node (buffer, field, spc, flags, false);
pp_string (buffer, "=");
}
if (val && TREE_CODE (val) == ADDR_EXPR)
if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL)
val = TREE_OPERAND (val, 0);
if (val && TREE_CODE (val) == FUNCTION_DECL)
dump_decl_name (buffer, val, flags);
else
dump_generic_node (buffer, val, spc, flags, false);
if (ix != VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node)) - 1)
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
pp_character (buffer, '}');
}
break;
case COMPOUND_EXPR:
{
tree *tp;
if (flags & TDF_SLIM)
{
pp_string (buffer, "<COMPOUND_EXPR>");
break;
}
dump_generic_node (buffer, TREE_OPERAND (node, 0),
spc, flags, !(flags & TDF_SLIM));
if (flags & TDF_SLIM)
newline_and_indent (buffer, spc);
else
{
pp_character (buffer, ',');
pp_space (buffer);
}
for (tp = &TREE_OPERAND (node, 1);
TREE_CODE (*tp) == COMPOUND_EXPR;
tp = &TREE_OPERAND (*tp, 1))
{
dump_generic_node (buffer, TREE_OPERAND (*tp, 0),
spc, flags, !(flags & TDF_SLIM));
if (flags & TDF_SLIM)
newline_and_indent (buffer, spc);
else
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
dump_generic_node (buffer, *tp, spc, flags, !(flags & TDF_SLIM));
}
break;
case STATEMENT_LIST:
{
tree_stmt_iterator si;
bool first = true;
if (flags & TDF_SLIM)
{
pp_string (buffer, "<STATEMENT_LIST>");
break;
}
for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si))
{
if (!first)
newline_and_indent (buffer, spc);
else
first = false;
dump_generic_node (buffer, tsi_stmt (si), spc, flags, true);
}
}
break;
case MODIFY_EXPR:
case INIT_EXPR:
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '=');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
break;
case TARGET_EXPR:
pp_string (buffer, "TARGET_EXPR <");
dump_generic_node (buffer, TARGET_EXPR_SLOT (node), spc, flags, false);
pp_character (buffer, ',');
pp_space (buffer);
dump_generic_node (buffer, TARGET_EXPR_INITIAL (node), spc, flags, false);
pp_character (buffer, '>');
break;
case DECL_EXPR:
print_declaration (buffer, DECL_EXPR_DECL (node), spc, flags);
is_stmt = false;
break;
case COND_EXPR:
if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node)
{
pp_string (buffer, "if (");
dump_generic_node (buffer, COND_EXPR_COND (node), spc, flags, false);
pp_character (buffer, ')');
/* The lowered cond_exprs should always be printed in full. */
if (COND_EXPR_THEN (node)
&& (IS_EMPTY_STMT (COND_EXPR_THEN (node))
|| TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR)
&& COND_EXPR_ELSE (node)
&& (IS_EMPTY_STMT (COND_EXPR_ELSE (node))
|| TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR))
{
pp_space (buffer);
dump_generic_node (buffer, COND_EXPR_THEN (node), 0, flags, true);
pp_string (buffer, " else ");
dump_generic_node (buffer, COND_EXPR_ELSE (node), 0, flags, true);
}
else if (!(flags & TDF_SLIM))
{
/* Output COND_EXPR_THEN. */
if (COND_EXPR_THEN (node))
{
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, COND_EXPR_THEN (node), spc+4,
flags, true);
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
/* Output COND_EXPR_ELSE. */
if (COND_EXPR_ELSE (node))
{
newline_and_indent (buffer, spc);
pp_string (buffer, "else");
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, COND_EXPR_ELSE (node), spc+4,
flags, true);
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
}
is_expr = false;
}
else
{
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '?');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_space (buffer);
pp_character (buffer, ':');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
}
break;
case BIND_EXPR:
pp_character (buffer, '{');
if (!(flags & TDF_SLIM))
{
if (BIND_EXPR_VARS (node))
{
pp_newline (buffer);
for (op0 = BIND_EXPR_VARS (node); op0; op0 = TREE_CHAIN (op0))
{
print_declaration (buffer, op0, spc+2, flags);
pp_newline (buffer);
}
}
newline_and_indent (buffer, spc+2);
dump_generic_node (buffer, BIND_EXPR_BODY (node), spc+2, flags, true);
newline_and_indent (buffer, spc);
pp_character (buffer, '}');
}
is_expr = false;
break;
case CALL_EXPR:
print_call_name (buffer, node);
/* Print parameters. */
pp_space (buffer);
pp_character (buffer, '(');
op1 = TREE_OPERAND (node, 1);
if (op1)
dump_generic_node (buffer, op1, spc, flags, false);
pp_character (buffer, ')');
op1 = TREE_OPERAND (node, 2);
if (op1)
{
pp_string (buffer, " [static-chain: ");
dump_generic_node (buffer, op1, spc, flags, false);
pp_character (buffer, ']');
}
if (CALL_EXPR_RETURN_SLOT_OPT (node))
pp_string (buffer, " [return slot optimization]");
if (CALL_EXPR_TAILCALL (node))
pp_string (buffer, " [tail call]");
break;
case WITH_CLEANUP_EXPR:
NIY;
break;
case CLEANUP_POINT_EXPR:
pp_string (buffer, "<<cleanup_point ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">>");
break;
case PLACEHOLDER_EXPR:
pp_string (buffer, "<PLACEHOLDER_EXPR ");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_character (buffer, '>');
break;
/* Binary arithmetic and logic expressions. */
case WIDEN_SUM_EXPR:
case WIDEN_MULT_EXPR:
case MULT_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
case VEC_LSHIFT_EXPR:
case VEC_RSHIFT_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
case EQ_EXPR:
case NE_EXPR:
case UNLT_EXPR:
case UNLE_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
case LTGT_EXPR:
case ORDERED_EXPR:
case UNORDERED_EXPR:
{
const char *op = op_symbol (node);
op0 = TREE_OPERAND (node, 0);
op1 = TREE_OPERAND (node, 1);
/* When the operands are expressions with less priority,
keep semantics of the tree representation. */
if (op_prio (op0) < op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, op0, spc, flags, false);
pp_space (buffer);
pp_string (buffer, op);
pp_space (buffer);
/* When the operands are expressions with less priority,
keep semantics of the tree representation. */
if (op_prio (op1) < op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, op1, spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, op1, spc, flags, false);
}
break;
/* Unary arithmetic and logic expressions. */
case NEGATE_EXPR:
case BIT_NOT_EXPR:
case TRUTH_NOT_EXPR:
case ADDR_EXPR:
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case ALIGN_INDIRECT_REF:
case MISALIGNED_INDIRECT_REF:
case INDIRECT_REF:
if (TREE_CODE (node) == ADDR_EXPR
&& (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST
|| TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL))
; /* Do not output '&' for strings and function pointers. */
else
pp_string (buffer, op_symbol (node));
if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
if (TREE_CODE (node) == MISALIGNED_INDIRECT_REF)
{
pp_string (buffer, "{misalignment: ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_character (buffer, '}');
}
break;
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, op_symbol (node));
break;
case MIN_EXPR:
pp_string (buffer, "MIN_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_character (buffer, '>');
break;
case MAX_EXPR:
pp_string (buffer, "MAX_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_character (buffer, '>');
break;
case ABS_EXPR:
pp_string (buffer, "ABS_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, '>');
break;
case RANGE_EXPR:
NIY;
break;
case FIX_TRUNC_EXPR:
case FIX_CEIL_EXPR:
case FIX_FLOOR_EXPR:
case FIX_ROUND_EXPR:
case FLOAT_EXPR:
case CONVERT_EXPR:
case NOP_EXPR:
type = TREE_TYPE (node);
op0 = TREE_OPERAND (node, 0);
if (type != TREE_TYPE (op0))
{
pp_character (buffer, '(');
dump_generic_node (buffer, type, spc, flags, false);
pp_string (buffer, ") ");
}
if (op_prio (op0) < op_prio (node))
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, ')');
break;
case VIEW_CONVERT_EXPR:
pp_string (buffer, "VIEW_CONVERT_EXPR<");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_string (buffer, ">(");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, ')');
break;
case NON_LVALUE_EXPR:
pp_string (buffer, "NON_LVALUE_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, '>');
break;
case SAVE_EXPR:
pp_string (buffer, "SAVE_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, '>');
break;
case COMPLEX_EXPR:
pp_string (buffer, "COMPLEX_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ">");
break;
case CONJ_EXPR:
pp_string (buffer, "CONJ_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case REALPART_EXPR:
pp_string (buffer, "REALPART_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case IMAGPART_EXPR:
pp_string (buffer, "IMAGPART_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case VA_ARG_EXPR:
pp_string (buffer, "VA_ARG_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
pp_string (buffer, "try");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
newline_and_indent (buffer, spc);
pp_string (buffer,
(TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
is_expr = false;
break;
case CATCH_EXPR:
pp_string (buffer, "catch (");
dump_generic_node (buffer, CATCH_TYPES (node), spc+2, flags, false);
pp_string (buffer, ")");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, CATCH_BODY (node), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
is_expr = false;
break;
case EH_FILTER_EXPR:
pp_string (buffer, "<<<eh_filter (");
dump_generic_node (buffer, EH_FILTER_TYPES (node), spc+2, flags, false);
pp_string (buffer, ")>>>");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, EH_FILTER_FAILURE (node), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
is_expr = false;
break;
case LABEL_EXPR:
op0 = TREE_OPERAND (node, 0);
/* If this is for break or continue, don't bother printing it. */
if (DECL_NAME (op0))
{
const char *name = IDENTIFIER_POINTER (DECL_NAME (op0));
if (strcmp (name, "break") == 0
|| strcmp (name, "continue") == 0)
break;
}
dump_generic_node (buffer, op0, spc, flags, false);
pp_character (buffer, ':');
if (DECL_NONLOCAL (op0))
pp_string (buffer, " [non-local]");
break;
case EXC_PTR_EXPR:
pp_string (buffer, "<<<exception object>>>");
break;
case FILTER_EXPR:
pp_string (buffer, "<<<filter object>>>");
break;
case LOOP_EXPR:
pp_string (buffer, "while (1)");
if (!(flags & TDF_SLIM))
{
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, LOOP_EXPR_BODY (node), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
is_expr = false;
break;
case RETURN_EXPR:
pp_string (buffer, "return");
op0 = TREE_OPERAND (node, 0);
if (op0)
{
pp_space (buffer);
if (TREE_CODE (op0) == MODIFY_EXPR)
dump_generic_node (buffer, TREE_OPERAND (op0, 1), spc, flags, false);
else
dump_generic_node (buffer, op0, spc, flags, false);
}
break;
case EXIT_EXPR:
pp_string (buffer, "if (");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ") break");
break;
case SWITCH_EXPR:
pp_string (buffer, "switch (");
dump_generic_node (buffer, SWITCH_COND (node), spc, flags, false);
pp_character (buffer, ')');
if (!(flags & TDF_SLIM))
{
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
if (SWITCH_BODY (node))
{
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, SWITCH_BODY (node), spc+4, flags,
true);
}
else
{
tree vec = SWITCH_LABELS (node);
size_t i, n = TREE_VEC_LENGTH (vec);
for (i = 0; i < n; ++i)
{
tree elt = TREE_VEC_ELT (vec, i);
newline_and_indent (buffer, spc+4);
if (elt)
{
dump_generic_node (buffer, elt, spc+4, flags, false);
pp_string (buffer, " goto ");
dump_generic_node (buffer, CASE_LABEL (elt), spc+4,
flags, true);
pp_semicolon (buffer);
}
else
pp_string (buffer, "case ???: goto ???;");
}
}
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
is_expr = false;
break;
case GOTO_EXPR:
op0 = GOTO_DESTINATION (node);
if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0))
{
const char *name = IDENTIFIER_POINTER (DECL_NAME (op0));
if (strcmp (name, "break") == 0
|| strcmp (name, "continue") == 0)
{
pp_string (buffer, name);
break;
}
}
pp_string (buffer, "goto ");
dump_generic_node (buffer, op0, spc, flags, false);
break;
case RESX_EXPR:
pp_string (buffer, "resx ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
break;
case ASM_EXPR:
pp_string (buffer, "__asm__");
if (ASM_VOLATILE_P (node))
pp_string (buffer, " __volatile__");
pp_character (buffer, '(');
dump_generic_node (buffer, ASM_STRING (node), spc, flags, false);
pp_character (buffer, ':');
dump_generic_node (buffer, ASM_OUTPUTS (node), spc, flags, false);
pp_character (buffer, ':');
dump_generic_node (buffer, ASM_INPUTS (node), spc, flags, false);
if (ASM_CLOBBERS (node))
{
pp_character (buffer, ':');
dump_generic_node (buffer, ASM_CLOBBERS (node), spc, flags, false);
}
pp_string (buffer, ")");
break;
case CASE_LABEL_EXPR:
if (CASE_LOW (node) && CASE_HIGH (node))
{
pp_string (buffer, "case ");
dump_generic_node (buffer, CASE_LOW (node), spc, flags, false);
pp_string (buffer, " ... ");
dump_generic_node (buffer, CASE_HIGH (node), spc, flags, false);
}
else if (CASE_LOW (node))
{
pp_string (buffer, "case ");
dump_generic_node (buffer, CASE_LOW (node), spc, flags, false);
}
else
pp_string (buffer, "default ");
pp_character (buffer, ':');
break;
case OBJ_TYPE_REF:
pp_string (buffer, "OBJ_TYPE_REF(");
dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false);
pp_character (buffer, ';');
dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false);
pp_character (buffer, '-');
pp_character (buffer, '>');
dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false);
pp_character (buffer, ')');
break;
case PHI_NODE:
{
int i;
dump_generic_node (buffer, PHI_RESULT (node), spc, flags, false);
pp_string (buffer, " = PHI <");
for (i = 0; i < PHI_NUM_ARGS (node); i++)
{
dump_generic_node (buffer, PHI_ARG_DEF (node, i), spc, flags, false);
pp_string (buffer, "(");
pp_decimal_int (buffer, PHI_ARG_EDGE (node, i)->src->index);
pp_string (buffer, ")");
if (i < PHI_NUM_ARGS (node) - 1)
pp_string (buffer, ", ");
}
pp_string (buffer, ">;");
}
break;
case SSA_NAME:
dump_generic_node (buffer, SSA_NAME_VAR (node), spc, flags, false);
pp_string (buffer, "_");
pp_decimal_int (buffer, SSA_NAME_VERSION (node));
if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node))
pp_string (buffer, "(ab)");
break;
case WITH_SIZE_EXPR:
pp_string (buffer, "WITH_SIZE_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ">");
break;
case VALUE_HANDLE:
pp_printf (buffer, "VH.%d", VALUE_HANDLE_ID (node));
break;
case ASSERT_EXPR:
pp_string (buffer, "ASSERT_EXPR <");
dump_generic_node (buffer, ASSERT_EXPR_VAR (node), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, ASSERT_EXPR_COND (node), spc, flags, false);
pp_string (buffer, ">");
break;
case SCEV_KNOWN:
pp_string (buffer, "scev_known");
break;
case SCEV_NOT_KNOWN:
pp_string (buffer, "scev_not_known");
break;
case POLYNOMIAL_CHREC:
pp_string (buffer, "{");
dump_generic_node (buffer, CHREC_LEFT (node), spc, flags, false);
pp_string (buffer, ", +, ");
dump_generic_node (buffer, CHREC_RIGHT (node), spc, flags, false);
pp_string (buffer, "}_");
dump_generic_node (buffer, CHREC_VAR (node), spc, flags, false);
is_stmt = false;
break;
case REALIGN_LOAD_EXPR:
pp_string (buffer, "REALIGN_LOAD <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, ">");
break;
case VEC_COND_EXPR:
pp_string (buffer, " VEC_COND_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " , ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " , ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case DOT_PROD_EXPR:
pp_string (buffer, " DOT_PROD_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " , ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " , ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case OMP_PARALLEL:
pp_string (buffer, "#pragma omp parallel");
dump_omp_clauses (buffer, OMP_PARALLEL_CLAUSES (node), spc, flags);
if (OMP_PARALLEL_FN (node))
{
pp_string (buffer, " [child fn: ");
dump_generic_node (buffer, OMP_PARALLEL_FN (node), spc, flags, false);
pp_string (buffer, " (");
if (OMP_PARALLEL_DATA_ARG (node))
dump_generic_node (buffer, OMP_PARALLEL_DATA_ARG (node), spc, flags,
false);
else
pp_string (buffer, "???");
pp_string (buffer, ")]");
}
dump_omp_body:
if (!(flags & TDF_SLIM) && OMP_BODY (node))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc + 4);
dump_generic_node (buffer, OMP_BODY (node), spc + 4, flags, false);
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
is_expr = false;
break;
case OMP_FOR:
pp_string (buffer, "#pragma omp for");
dump_omp_clauses (buffer, OMP_FOR_CLAUSES (node), spc, flags);
if (!(flags & TDF_SLIM))
{
if (OMP_FOR_PRE_BODY (node))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
spc += 4;
newline_and_indent (buffer, spc);
dump_generic_node (buffer, OMP_FOR_PRE_BODY (node),
spc, flags, false);
}
newline_and_indent (buffer, spc);
pp_string (buffer, "for (");
dump_generic_node (buffer, OMP_FOR_INIT (node), spc, flags, false);
pp_string (buffer, "; ");
dump_generic_node (buffer, OMP_FOR_COND (node), spc, flags, false);
pp_string (buffer, "; ");
dump_generic_node (buffer, OMP_FOR_INCR (node), spc, flags, false);
pp_string (buffer, ")");
if (OMP_FOR_BODY (node))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc + 4);
dump_generic_node (buffer, OMP_FOR_BODY (node), spc + 4, flags,
false);
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
if (OMP_FOR_PRE_BODY (node))
{
spc -= 4;
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
}
is_expr = false;
break;
case OMP_SECTIONS:
pp_string (buffer, "#pragma omp sections");
dump_omp_clauses (buffer, OMP_SECTIONS_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_SECTION:
pp_string (buffer, "#pragma omp section");
goto dump_omp_body;
case OMP_MASTER:
pp_string (buffer, "#pragma omp master");
goto dump_omp_body;
case OMP_ORDERED:
pp_string (buffer, "#pragma omp ordered");
goto dump_omp_body;
case OMP_CRITICAL:
pp_string (buffer, "#pragma omp critical");
if (OMP_CRITICAL_NAME (node))
{
pp_space (buffer);
pp_character (buffer, '(');
dump_generic_node (buffer, OMP_CRITICAL_NAME (node), spc,
flags, false);
pp_character (buffer, ')');
}
goto dump_omp_body;
case OMP_ATOMIC:
pp_string (buffer, "#pragma omp atomic");
newline_and_indent (buffer, spc + 2);
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '=');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
break;
case OMP_SINGLE:
pp_string (buffer, "#pragma omp single");
dump_omp_clauses (buffer, OMP_SINGLE_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_RETURN:
pp_string (buffer, "OMP_RETURN");
if (OMP_RETURN_NOWAIT (node))
pp_string (buffer, " [nowait]");
is_expr = false;
break;
case OMP_CONTINUE:
pp_string (buffer, "OMP_CONTINUE");
is_expr = false;
break;
case OMP_CLAUSE:
dump_omp_clause (buffer, node, spc, flags);
is_expr = false;
break;
case REDUC_MAX_EXPR:
pp_string (buffer, " REDUC_MAX_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case REDUC_MIN_EXPR:
pp_string (buffer, " REDUC_MIN_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case REDUC_PLUS_EXPR:
pp_string (buffer, " REDUC_PLUS_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case BLOCK:
{
tree t;
pp_string (buffer, "BLOCK");
if (BLOCK_ABSTRACT (node))
pp_string (buffer, " [abstract]");
if (TREE_ASM_WRITTEN (node))
pp_string (buffer, " [written]");
newline_and_indent (buffer, spc + 2);
if (BLOCK_SUPERCONTEXT (node))
{
pp_string (buffer, "SUPERCONTEXT: ");
if (TREE_CODE (BLOCK_SUPERCONTEXT (node)) == BLOCK)
pp_printf (buffer, "BLOCK %p",
(void *)BLOCK_SUPERCONTEXT (node));
else
dump_generic_node (buffer, BLOCK_SUPERCONTEXT (node), 0, flags,
false);
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_SUBBLOCKS (node))
{
pp_string (buffer, "SUBBLOCKS: ");
for (t = BLOCK_SUBBLOCKS (node); t; t = BLOCK_CHAIN (t))
pp_printf (buffer, "%p ", (void *)t);
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_VARS (node))
{
pp_string (buffer, "VARS: ");
for (t = BLOCK_VARS (node); t; t = TREE_CHAIN (t))
{
dump_generic_node (buffer, t, 0, flags, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_ABSTRACT_ORIGIN (node))
{
pp_string (buffer, "ABSTRACT_ORIGIN: ");
if (TREE_CODE (BLOCK_ABSTRACT_ORIGIN (node)) == BLOCK)
pp_printf (buffer, "BLOCK %p",
(void *)BLOCK_ABSTRACT_ORIGIN (node));
else
dump_generic_node (buffer, BLOCK_ABSTRACT_ORIGIN (node), 0, flags,
false);
newline_and_indent (buffer, spc + 2);
}
}
break;
default:
NIY;
}
if (is_stmt && is_expr)
pp_semicolon (buffer);
pp_write_text_to_stream (buffer);
return spc;
}
/* Print the declaration of a variable. */
static void
print_declaration (pretty_printer *buffer, tree t, int spc, int flags)
{
INDENT (spc);
if (TREE_CODE (t) == TYPE_DECL)
pp_string (buffer, "typedef ");
if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t))
pp_string (buffer, "register ");
if (TREE_PUBLIC (t) && DECL_EXTERNAL (t))
pp_string (buffer, "extern ");
else if (TREE_STATIC (t))
pp_string (buffer, "static ");
/* Print the type and name. */
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
{
tree tmp;
/* Print array's type. */
tmp = TREE_TYPE (t);
while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE)
tmp = TREE_TYPE (tmp);
dump_generic_node (buffer, TREE_TYPE (tmp), spc, flags, false);
/* Print variable's name. */
pp_space (buffer);
dump_generic_node (buffer, t, spc, flags, false);
/* Print the dimensions. */
tmp = TREE_TYPE (t);
while (TREE_CODE (tmp) == ARRAY_TYPE)
{
dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags);
tmp = TREE_TYPE (tmp);
}
}
else if (TREE_CODE (t) == FUNCTION_DECL)
{
dump_generic_node (buffer, TREE_TYPE (TREE_TYPE (t)), spc, flags, false);
pp_space (buffer);
dump_decl_name (buffer, t, flags);
dump_function_declaration (buffer, TREE_TYPE (t), spc, flags);
}
else
{
/* Print type declaration. */
dump_generic_node (buffer, TREE_TYPE (t), spc, flags, false);
/* Print variable's name. */
pp_space (buffer);
dump_generic_node (buffer, t, spc, flags, false);
}
if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t))
{
pp_string (buffer, " __asm__ ");
pp_character (buffer, '(');
dump_generic_node (buffer, DECL_ASSEMBLER_NAME (t), spc, flags, false);
pp_character (buffer, ')');
}
/* The initial value of a function serves to determine wether the function
is declared or defined. So the following does not apply to function
nodes. */
if (TREE_CODE (t) != FUNCTION_DECL)
{
/* Print the initial value. */
if (DECL_INITIAL (t))
{
pp_space (buffer);
pp_character (buffer, '=');
pp_space (buffer);
dump_generic_node (buffer, DECL_INITIAL (t), spc, flags, false);
}
}
if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
{
pp_string (buffer, " [value-expr: ");
dump_generic_node (buffer, DECL_VALUE_EXPR (t), spc, flags, false);
pp_character (buffer, ']');
}
pp_character (buffer, ';');
}
/* Prints a structure: name, fields, and methods.
FIXME: Still incomplete. */
static void
print_struct_decl (pretty_printer *buffer, tree node, int spc, int flags)
{
/* Print the name of the structure. */
if (TYPE_NAME (node))
{
INDENT (spc);
if (TREE_CODE (node) == RECORD_TYPE)
pp_string (buffer, "struct ");
else if ((TREE_CODE (node) == UNION_TYPE
|| TREE_CODE (node) == QUAL_UNION_TYPE))
pp_string (buffer, "union ");
dump_generic_node (buffer, TYPE_NAME (node), spc, 0, false);
}
/* Print the contents of the structure. */
pp_newline (buffer);
INDENT (spc);
pp_character (buffer, '{');
pp_newline (buffer);
/* Print the fields of the structure. */
{
tree tmp;
tmp = TYPE_FIELDS (node);
while (tmp)
{
/* Avoid to print recursively the structure. */
/* FIXME : Not implemented correctly...,
what about the case when we have a cycle in the contain graph? ...
Maybe this could be solved by looking at the scope in which the
structure was declared. */
if (TREE_TYPE (tmp) != node
|| (TREE_CODE (TREE_TYPE (tmp)) == POINTER_TYPE
&& TREE_TYPE (TREE_TYPE (tmp)) != node))
{
print_declaration (buffer, tmp, spc+2, flags);
pp_newline (buffer);
}
tmp = TREE_CHAIN (tmp);
}
}
INDENT (spc);
pp_character (buffer, '}');
}
/* Return the priority of the operator OP.
From lowest to highest precedence with either left-to-right (L-R)
or right-to-left (R-L) associativity]:
1 [L-R] ,
2 [R-L] = += -= *= /= %= &= ^= |= <<= >>=
3 [R-L] ?:
4 [L-R] ||
5 [L-R] &&
6 [L-R] |
7 [L-R] ^
8 [L-R] &
9 [L-R] == !=
10 [L-R] < <= > >=
11 [L-R] << >>
12 [L-R] + -
13 [L-R] * / %
14 [R-L] ! ~ ++ -- + - * & (type) sizeof
15 [L-R] fn() [] -> .
unary +, - and * have higher precedence than the corresponding binary
operators. */
static int
op_prio (tree op)
{
if (op == NULL)
return 9999;
switch (TREE_CODE (op))
{
case TREE_LIST:
case COMPOUND_EXPR:
case BIND_EXPR:
return 1;
case MODIFY_EXPR:
case INIT_EXPR:
return 2;
case COND_EXPR:
return 3;
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
return 4;
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
return 5;
case BIT_IOR_EXPR:
return 6;
case BIT_XOR_EXPR:
case TRUTH_XOR_EXPR:
return 7;
case BIT_AND_EXPR:
return 8;
case EQ_EXPR:
case NE_EXPR:
return 9;
case UNLT_EXPR:
case UNLE_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
case LTGT_EXPR:
case ORDERED_EXPR:
case UNORDERED_EXPR:
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
return 10;
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
return 11;
case WIDEN_SUM_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
return 12;
case WIDEN_MULT_EXPR:
case DOT_PROD_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
return 13;
case TRUTH_NOT_EXPR:
case BIT_NOT_EXPR:
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case NEGATE_EXPR:
case ALIGN_INDIRECT_REF:
case MISALIGNED_INDIRECT_REF:
case INDIRECT_REF:
case ADDR_EXPR:
case FLOAT_EXPR:
case NOP_EXPR:
case CONVERT_EXPR:
case FIX_TRUNC_EXPR:
case FIX_CEIL_EXPR:
case FIX_FLOOR_EXPR:
case FIX_ROUND_EXPR:
case TARGET_EXPR:
return 14;
case CALL_EXPR:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case COMPONENT_REF:
return 15;
/* Special expressions. */
case MIN_EXPR:
case MAX_EXPR:
case ABS_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
case REDUC_MAX_EXPR:
case REDUC_MIN_EXPR:
case REDUC_PLUS_EXPR:
case VEC_LSHIFT_EXPR:
case VEC_RSHIFT_EXPR:
return 16;
case SAVE_EXPR:
case NON_LVALUE_EXPR:
return op_prio (TREE_OPERAND (op, 0));
default:
/* Return an arbitrarily high precedence to avoid surrounding single
VAR_DECLs in ()s. */
return 9999;
}
}
/* Return the symbol associated with operator OP. */
static const char *
op_symbol_1 (enum tree_code code)
{
switch (code)
{
case MODIFY_EXPR:
return "=";
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
return "||";
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
return "&&";
case BIT_IOR_EXPR:
return "|";
case TRUTH_XOR_EXPR:
case BIT_XOR_EXPR:
return "^";
case ADDR_EXPR:
case BIT_AND_EXPR:
return "&";
case ORDERED_EXPR:
return "ord";
case UNORDERED_EXPR:
return "unord";
case EQ_EXPR:
return "==";
case UNEQ_EXPR:
return "u==";
case NE_EXPR:
return "!=";
case LT_EXPR:
return "<";
case UNLT_EXPR:
return "u<";
case LE_EXPR:
return "<=";
case UNLE_EXPR:
return "u<=";
case GT_EXPR:
return ">";
case UNGT_EXPR:
return "u>";
case GE_EXPR:
return ">=";
case UNGE_EXPR:
return "u>=";
case LTGT_EXPR:
return "<>";
case LSHIFT_EXPR:
return "<<";
case RSHIFT_EXPR:
return ">>";
case LROTATE_EXPR:
return "r<<";
case RROTATE_EXPR:
return "r>>";
case VEC_LSHIFT_EXPR:
return "v<<";
case VEC_RSHIFT_EXPR:
return "v>>";
case PLUS_EXPR:
return "+";
case REDUC_PLUS_EXPR:
return "r+";
case WIDEN_SUM_EXPR:
return "w+";
case WIDEN_MULT_EXPR:
return "w*";
case NEGATE_EXPR:
case MINUS_EXPR:
return "-";
case BIT_NOT_EXPR:
return "~";
case TRUTH_NOT_EXPR:
return "!";
case MULT_EXPR:
case INDIRECT_REF:
return "*";
case ALIGN_INDIRECT_REF:
return "A*";
case MISALIGNED_INDIRECT_REF:
return "M*";
case TRUNC_DIV_EXPR:
case RDIV_EXPR:
return "/";
case CEIL_DIV_EXPR:
return "/[cl]";
case FLOOR_DIV_EXPR:
return "/[fl]";
case ROUND_DIV_EXPR:
return "/[rd]";
case EXACT_DIV_EXPR:
return "/[ex]";
case TRUNC_MOD_EXPR:
return "%";
case CEIL_MOD_EXPR:
return "%[cl]";
case FLOOR_MOD_EXPR:
return "%[fl]";
case ROUND_MOD_EXPR:
return "%[rd]";
case PREDECREMENT_EXPR:
return " --";
case PREINCREMENT_EXPR:
return " ++";
case POSTDECREMENT_EXPR:
return "-- ";
case POSTINCREMENT_EXPR:
return "++ ";
case MAX_EXPR:
return "max";
case MIN_EXPR:
return "min";
default:
return "<<< ??? >>>";
}
}
static const char *
op_symbol (tree op)
{
return op_symbol_1 (TREE_CODE (op));
}
/* Prints the name of a CALL_EXPR. */
static void
print_call_name (pretty_printer *buffer, tree node)
{
tree op0;
gcc_assert (TREE_CODE (node) == CALL_EXPR);
op0 = TREE_OPERAND (node, 0);
if (TREE_CODE (op0) == NON_LVALUE_EXPR)
op0 = TREE_OPERAND (op0, 0);
switch (TREE_CODE (op0))
{
case VAR_DECL:
case PARM_DECL:
dump_function_name (buffer, op0);
break;
case ADDR_EXPR:
case INDIRECT_REF:
case NOP_EXPR:
dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false);
break;
case COND_EXPR:
pp_string (buffer, "(");
dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false);
pp_string (buffer, ") ? ");
dump_generic_node (buffer, TREE_OPERAND (op0, 1), 0, 0, false);
pp_string (buffer, " : ");
dump_generic_node (buffer, TREE_OPERAND (op0, 2), 0, 0, false);
break;
case COMPONENT_REF:
/* The function is a pointer contained in a structure. */
if (TREE_CODE (TREE_OPERAND (op0, 0)) == INDIRECT_REF ||
TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL)
dump_function_name (buffer, TREE_OPERAND (op0, 1));
else
dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false);
/* else
We can have several levels of structures and a function
pointer inside. This is not implemented yet... */
/* NIY;*/
break;
case ARRAY_REF:
if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL)
dump_function_name (buffer, TREE_OPERAND (op0, 0));
else
dump_generic_node (buffer, op0, 0, 0, false);
break;
case SSA_NAME:
case OBJ_TYPE_REF:
dump_generic_node (buffer, op0, 0, 0, false);
break;
default:
NIY;
}
}
/* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */
static void
pretty_print_string (pretty_printer *buffer, const char *str)
{
if (str == NULL)
return;
while (*str)
{
switch (str[0])
{
case '\b':
pp_string (buffer, "\\b");
break;
case '\f':
pp_string (buffer, "\\f");
break;
case '\n':
pp_string (buffer, "\\n");
break;
case '\r':
pp_string (buffer, "\\r");
break;
case '\t':
pp_string (buffer, "\\t");
break;
case '\v':
pp_string (buffer, "\\v");
break;
case '\\':
pp_string (buffer, "\\\\");
break;
case '\"':
pp_string (buffer, "\\\"");
break;
case '\'':
pp_string (buffer, "\\'");
break;
/* No need to handle \0; the loop terminates on \0. */
case '\1':
pp_string (buffer, "\\1");
break;
case '\2':
pp_string (buffer, "\\2");
break;
case '\3':
pp_string (buffer, "\\3");
break;
case '\4':
pp_string (buffer, "\\4");
break;
case '\5':
pp_string (buffer, "\\5");
break;
case '\6':
pp_string (buffer, "\\6");
break;
case '\7':
pp_string (buffer, "\\7");
break;
default:
pp_character (buffer, str[0]);
break;
}
str++;
}
}
static void
maybe_init_pretty_print (FILE *file)
{
if (!initialized)
{
pp_construct (&buffer, /* prefix */NULL, /* line-width */0);
pp_needs_newline (&buffer) = true;
initialized = 1;
}
buffer.buffer->stream = file;
}
static void
newline_and_indent (pretty_printer *buffer, int spc)
{
pp_newline (buffer);
INDENT (spc);
}
static void
dump_vops (pretty_printer *buffer, tree stmt, int spc, int flags)
{
tree use;
use_operand_p use_p;
def_operand_p def_p;
use_operand_p kill_p;
ssa_op_iter iter;
if (!ssa_operands_active ())
return;
FOR_EACH_SSA_MAYDEF_OPERAND (def_p, use_p, stmt, iter)
{
pp_string (buffer, "# ");
dump_generic_node (buffer, DEF_FROM_PTR (def_p),
spc + 2, flags, false);
pp_string (buffer, " = V_MAY_DEF <");
dump_generic_node (buffer, USE_FROM_PTR (use_p),
spc + 2, flags, false);
pp_string (buffer, ">;");
newline_and_indent (buffer, spc);
}
FOR_EACH_SSA_MUSTDEF_OPERAND (def_p, kill_p, stmt, iter)
{
pp_string (buffer, "# ");
dump_generic_node (buffer, DEF_FROM_PTR (def_p),
spc + 2, flags, false);
pp_string (buffer, " = V_MUST_DEF <");
dump_generic_node (buffer, USE_FROM_PTR (kill_p),
spc + 2, flags, false);
pp_string (buffer, ">;");
newline_and_indent (buffer, spc);
}
FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_VUSE)
{
pp_string (buffer, "# VUSE <");
dump_generic_node (buffer, use, spc + 2, flags, false);
pp_string (buffer, ">;");
newline_and_indent (buffer, spc);
}
}
/* Dumps basic block BB to FILE with details described by FLAGS and
indented by INDENT spaces. */
void
dump_generic_bb (FILE *file, basic_block bb, int indent, int flags)
{
maybe_init_pretty_print (file);
dump_generic_bb_buff (&buffer, bb, indent, flags);
pp_flush (&buffer);
}
/* Dumps header of basic block BB to buffer BUFFER indented by INDENT
spaces and details described by flags. */
static void
dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags)
{
edge e;
tree stmt;
edge_iterator ei;
if (flags & TDF_BLOCKS)
{
INDENT (indent);
pp_string (buffer, "# BLOCK ");
pp_decimal_int (buffer, bb->index);
if (bb->frequency)
{
pp_string (buffer, " freq:");
pp_decimal_int (buffer, bb->frequency);
}
if (bb->count)
{
pp_string (buffer, " count:");
pp_widest_integer (buffer, bb->count);
}
if (flags & TDF_LINENO)
{
block_stmt_iterator bsi;
for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
if (get_lineno (bsi_stmt (bsi)) != -1)
{
pp_string (buffer, ", starting at line ");
pp_decimal_int (buffer, get_lineno (bsi_stmt (bsi)));
break;
}
}
newline_and_indent (buffer, indent);
pp_string (buffer, "# PRED:");
pp_write_text_to_stream (buffer);
FOR_EACH_EDGE (e, ei, bb->preds)
if (flags & TDF_SLIM)
{
pp_string (buffer, " ");
if (e->src == ENTRY_BLOCK_PTR)
pp_string (buffer, "ENTRY");
else
pp_decimal_int (buffer, e->src->index);
}
else
dump_edge_info (buffer->buffer->stream, e, 0);
pp_newline (buffer);
}
else
{
stmt = first_stmt (bb);
if (!stmt || TREE_CODE (stmt) != LABEL_EXPR)
{
INDENT (indent - 2);
pp_string (buffer, "<bb ");
pp_decimal_int (buffer, bb->index);
pp_string (buffer, ">:");
pp_newline (buffer);
}
}
pp_write_text_to_stream (buffer);
check_bb_profile (bb, buffer->buffer->stream);
}
/* Dumps end of basic block BB to buffer BUFFER indented by INDENT
spaces. */
static void
dump_bb_end (pretty_printer *buffer, basic_block bb, int indent, int flags)
{
edge e;
edge_iterator ei;
INDENT (indent);
pp_string (buffer, "# SUCC:");
pp_write_text_to_stream (buffer);
FOR_EACH_EDGE (e, ei, bb->succs)
if (flags & TDF_SLIM)
{
pp_string (buffer, " ");
if (e->dest == EXIT_BLOCK_PTR)
pp_string (buffer, "EXIT");
else
pp_decimal_int (buffer, e->dest->index);
}
else
dump_edge_info (buffer->buffer->stream, e, 1);
pp_newline (buffer);
}
/* Dumps phi nodes of basic block BB to buffer BUFFER with details described by
FLAGS indented by INDENT spaces. */
static void
dump_phi_nodes (pretty_printer *buffer, basic_block bb, int indent, int flags)
{
tree phi = phi_nodes (bb);
if (!phi)
return;
for (; phi; phi = PHI_CHAIN (phi))
{
if (is_gimple_reg (PHI_RESULT (phi)) || (flags & TDF_VOPS))
{
INDENT (indent);
pp_string (buffer, "# ");
dump_generic_node (buffer, phi, indent, flags, false);
pp_newline (buffer);
}
}
}
/* Dump jump to basic block BB that is represented implicitly in the cfg
to BUFFER. */
static void
pp_cfg_jump (pretty_printer *buffer, basic_block bb)
{
tree stmt;
stmt = first_stmt (bb);
pp_string (buffer, "goto <bb ");
pp_decimal_int (buffer, bb->index);
pp_string (buffer, ">");
if (stmt && TREE_CODE (stmt) == LABEL_EXPR)
{
pp_string (buffer, " (");
dump_generic_node (buffer, LABEL_EXPR_LABEL (stmt), 0, 0, false);
pp_string (buffer, ")");
}
pp_semicolon (buffer);
}
/* Dump edges represented implicitly in basic block BB to BUFFER, indented
by INDENT spaces, with details given by FLAGS. */
static void
dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent,
int flags)
{
edge e;
edge_iterator ei;
/* If there is a fallthru edge, we may need to add an artificial goto to the
dump. */
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->flags & EDGE_FALLTHRU)
break;
if (e && e->dest != bb->next_bb)
{
INDENT (indent);
if ((flags & TDF_LINENO)
#ifdef USE_MAPPED_LOCATION
&& e->goto_locus != UNKNOWN_LOCATION
#else
&& e->goto_locus
#endif
)
{
expanded_location goto_xloc;
#ifdef USE_MAPPED_LOCATION
goto_xloc = expand_location (e->goto_locus);
#else
goto_xloc = *e->goto_locus;
#endif
pp_character (buffer, '[');
if (goto_xloc.file)
{
pp_string (buffer, goto_xloc.file);
pp_string (buffer, " : ");
}
pp_decimal_int (buffer, goto_xloc.line);
pp_string (buffer, "] ");
}
pp_cfg_jump (buffer, e->dest);
pp_newline (buffer);
}
}
/* Dumps basic block BB to buffer BUFFER with details described by FLAGS and
indented by INDENT spaces. */
static void
dump_generic_bb_buff (pretty_printer *buffer, basic_block bb,
int indent, int flags)
{
block_stmt_iterator bsi;
tree stmt;
int label_indent = indent - 2;
if (label_indent < 0)
label_indent = 0;
dump_bb_header (buffer, bb, indent, flags);
dump_phi_nodes (buffer, bb, indent, flags);
for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
{
int curr_indent;
stmt = bsi_stmt (bsi);
curr_indent = TREE_CODE (stmt) == LABEL_EXPR ? label_indent : indent;
INDENT (curr_indent);
dump_generic_node (buffer, stmt, curr_indent, flags, true);
pp_newline (buffer);
}
dump_implicit_edges (buffer, bb, indent, flags);
if (flags & TDF_BLOCKS)
dump_bb_end (buffer, bb, indent, flags);
}
|
AlloyImageProcessing.h | /*
* Copyright(C) 2015, Blake C. Lucas, Ph.D. (img.science@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef INCLUDE_ALLOYIMAGEPROCESSING_H_
#define INCLUDE_ALLOYIMAGEPROCESSING_H_
#include "AlloyImage.h"
namespace aly {
bool SANITY_CHECK_IMAGE_PROCESSING();
template<class T, size_t M, size_t N> void GaussianKernel(T (&kernel)[M][N],
T sigmaX = T(0.607902736 * (M - 1) * 0.5),
T sigmaY = T(0.607902736 * (N - 1) * 0.5)) {
T sum = 0;
for (int i = 0; i < (int)M; i++) {
for (int j = 0; j < (int)N; j++) {
T x = T(i - 0.5 * (M - 1));
T y = T(j - 0.5 * (N - 1));
double xn = x / sigmaX;
double yn = y / sigmaY;
T w = T(std::exp(-0.5 * (xn * xn + yn * yn)));
sum += w;
kernel[i][j] = w;
}
}
sum = T(1) / sum;
for (int i = 0; i < (int)M; i++) {
for (int j = 0; j < (int)N; j++) {
kernel[i][j] *= sum;
}
}
}
template<class T, size_t M, size_t N> void GaussianKernelDerivative(
T (&gX)[M][N], T (&gY)[M][N], T sigmaX = T(0.607902736 * (M - 1) * 0.5),
T sigmaY = T(0.607902736 * (N - 1) * 0.5)) {
T sum = 0;
for (int i = 0; i < (int)M; i++) {
for (int j = 0; j < (int)N; j++) {
T x = T(i - 0.5 * (M - 1));
T y = T(j - 0.5 * (N - 1));
double xn = x / sigmaX;
double yn = y / sigmaY;
T w = T(std::exp(-0.5 * (xn * xn + yn * yn)));
sum += w;
gX[i][j] = w * xn / sigmaX;
gY[i][j] = w * yn / sigmaY;
}
}
sum = T(1) / sum;
for (int i = 0; i < (int)M; i++) {
for (int j = 0; j < (int)N; j++) {
gX[i][j] *= sum;
gY[i][j] *= sum;
}
}
}
template<class T, size_t M, size_t N> void GaussianKernelLaplacian(
T (&kernel)[M][N], T sigmaX = T(0.607902736 * (M - 1) * 0.5), T sigmaY =
T(0.607902736 * (N - 1) * 0.5)) {
T sum = 0;
T sum2 = 0;
for (int i = 0; i < (int)M; i++) {
for (int j = 0; j < (int)N; j++) {
T x = T(i - 0.5 * (M - 1));
T y = T(j - 0.5 * (N - 1));
double xn = x / sigmaX;
double yn = y / sigmaY;
T w = T(std::exp(-0.5 * (xn * xn + yn * yn)));
sum += w;
T ww = w
* (xn * xn / (sigmaX * sigmaX) + yn * yn / (sigmaY * sigmaY)
- 1 / (sigmaX * sigmaX) - 1 / (sigmaY * sigmaY));
sum2 += ww;
kernel[i][j] = ww;
}
}
sum = T(1) / sum;
sum2 /= T(M * N);
for (int i = 0; i < (int)M; i++) {
for (int j = 0; j < (int)N; j++) {
kernel[i][j] = (kernel[i][j] - sum2) * sum;
}
}
}
template<class T, size_t M, size_t N> struct GaussianOperators {
T filter[M][N];
T filterGradX[M][N];
T filterGradY[M][N];
T filterLaplacian[M][N];
GaussianOperators(T sigmaX = T(0.607902736 * (M - 1) * 0.5),
T sigmaY = T(0.607902736 * (N - 1) * 0.5)) {
T sum = 0;
T sum2 = 0;
for (int i = 0; i < (int)M; i++) {
for (int j = 0; j < (int)N; j++) {
T x = T(i - 0.5 * (M - 1));
T y = T(j - 0.5 * (N - 1));
double xn = x / sigmaX;
double yn = y / sigmaY;
T w = T(std::exp(-0.5 * (xn * xn + yn * yn)));
filter[i][j] = w;
filterGradX[i][j] = w * xn / sigmaX;
filterGradY[i][j] = w * yn / sigmaY;
sum += w;
T ww = w
* (xn * xn / (sigmaX * sigmaX) + yn * yn / (sigmaY * sigmaY)
- 1 / (sigmaX * sigmaX) - 1 / (sigmaY * sigmaY));
sum2 += ww;
filterLaplacian[i][j] = ww;
}
}
sum = T(1) / sum;
sum2 /= T(M * N);
for (int i = 0; i < (int)M; i++) {
for (int j = 0; j < (int)N; j++) {
filterLaplacian[i][j] = (filterLaplacian[i][j] - sum2) * sum;
filter[i][j] *=sum;
filterGradX[i][j] *= sum;
filterGradY[i][j] *= sum;
}
}
}
};
template<size_t M, size_t N, class T, int C, ImageType I> void Gradient(
const Image<T, C, I>& image, Image<T, C, I>& gX, Image<T, C, I>& gY, double sigmaX = (0.607902736 * (M - 1) * 0.5),
double sigmaY = (0.607902736 * (N - 1) * 0.5)) {
double filterX[M][N], filterY[M][N];
GaussianKernelDerivative(filterX, filterY,sigmaX,sigmaY);
gX.resize(image.width, image.height);
gY.resize(image.width, image.height);
#pragma omp parallel for
for (int i = 0; i < image.width; i++) {
for (int j = 0; j < image.height; j++) {
vec<double, C> vsumX(0.0);
vec<double, C> vsumY(0.0);
for (int ii = 0; ii < (int)M; ii++) {
for (int jj = 0; jj < (int)N; jj++) {
vec<T, C> val = image(i + ii - (int)M / 2, j + jj - (int)N / 2);
vsumX += filterX[ii][jj] * vec<double, C>(val);
vsumY += filterY[ii][jj] * vec<double, C>(val);
}
}
gX(i, j) = vec<T, C>(vsumX);
gY(i, j) = vec<T, C>(vsumY);
}
}
}
template<size_t M, size_t N, class T, int C, ImageType I> void Laplacian(
const Image<T, C, I>& image, Image<T, C, I>& L, double sigmaX = (0.607902736 * (M - 1) * 0.5),
double sigmaY = (0.607902736 * (N - 1) * 0.5)) {
double filter[M][N];
GaussianKernelLaplacian(filter,sigmaX,sigmaY);
L.resize(image.width, image.height);
#pragma omp parallel for
for (int i = 0; i < image.width; i++) {
for (int j = 0; j < image.height; j++) {
vec<double, C> vsum(0.0);
for (int ii = 0; ii < (int)M; ii++) {
for (int jj = 0; jj < (int)N; jj++) {
vec<T, C> val = image(i + ii - (int)M / 2, j + jj - (int)N / 2);
vsum += filter[ii][jj] * vec<double, C>(val);
}
}
L(i, j) = vec<T, C>(vsum);
}
}
}
template<size_t M, size_t N, class T, int C, ImageType I> void Smooth(
const Image<T, C, I>& image, Image<T, C, I>& B, double sigmaX = (0.607902736 * (M - 1) * 0.5),
double sigmaY = (0.607902736 * (N - 1) * 0.5)) {
double filter[M][N];
GaussianKernel(filter,sigmaX,sigmaY);
B.resize(image.width, image.height);
#pragma omp parallel for
for (int i = 0; i < image.width; i++) {
for (int j = 0; j < image.height; j++) {
vec<double, C> vsum(0.0);
for (int ii = 0; ii < (int)M; ii++) {
for (int jj = 0; jj < (int)N; jj++) {
vec<T, C> val = image(i + ii - (int)M / 2, j + jj - (int)N / 2);
vsum += filter[ii][jj] * vec<double, C>(val);
}
}
B(i, j) = vec<T, C>(vsum);
}
}
}
template<class T, int C, ImageType I> void Smooth3x3(
const Image<T, C, I>& image, Image<T, C, I>& B) {
Smooth<3, 3>(image, B);
}
template<class T, int C, ImageType I> void Smooth5x5(
const Image<T, C, I>& image, Image<T, C, I>& B) {
Smooth<5, 5>(image, B);
}
template<class T, int C, ImageType I> void Smooth7x7(
const Image<T, C, I>& image, Image<T, C, I>& B) {
Smooth<7, 7>(image, B);
}
template<class T, int C, ImageType I> void Smooth11x11(
const Image<T, C, I>& image, Image<T, C, I>& B) {
Smooth<11, 11>(image, B);
}
template<class T, int C, ImageType I> void Laplacian3x3(
const Image<T, C, I>& image, Image<T, C, I>& L) {
Laplacian<3, 3>(image, L);
}
template<class T, int C, ImageType I> void Laplacian5x5(
const Image<T, C, I>& image, Image<T, C, I>& L) {
Laplacian<5, 5>(image, L);
}
template<class T, int C, ImageType I> void Laplacian7x7(
const Image<T, C, I>& image, Image<T, C, I>& L) {
Laplacian<7, 7>(image, L);
}
template<class T, int C, ImageType I> void Laplacian11x11(
const Image<T, C, I>& image, Image<T, C, I>& L) {
Laplacian<11, 11>(image, L);
}
template<class T, int C, ImageType I> void Gradient3x3(
const Image<T, C, I>& image, Image<T, C, I>& gX, Image<T, C, I>& gY) {
Gradient<3, 3>(image, gX, gY);
}
template<class T, int C, ImageType I> void Gradient5x5(
const Image<T, C, I>& image, Image<T, C, I>& gX, Image<T, C, I>& gY) {
Gradient<5, 5>(image, gX, gY);
}
template<class T, int C, ImageType I> void Gradient7x7(
const Image<T, C, I>& image, Image<T, C, I>& gX, Image<T, C, I>& gY) {
Gradient<7, 7>(image, gX, gY);
}
template<class T, int C, ImageType I> void Gradient11x11(
const Image<T, C, I>& image, Image<T, C, I>& gX, Image<T, C, I>& gY) {
Gradient<11, 11>(image, gX, gY);
}
}
#endif /* INCLUDE_CORE_IMAGEPROCESSING_H_ */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.