source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
hist.c | /*
* hist.c: Example of histogram calculation in OpenMP.
*
* (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <limits.h>
#include <omp.h>
const uint64_t width = 32 * 1024;
const uint64_t height = 32 * 1024;
void *xmalloc(size_t size)
{
void *p = malloc(size);
if (p == NULL) {
fprintf(stderr, "No enough memory\n");
exit(EXIT_FAILURE);
}
return p;
}
void hist_serial(uint8_t *pixels, int height, int width)
{
uint64_t npixels = height * width;
// Number of an occurrence of an each pixel in the image
int *h = xmalloc(sizeof(*h) * 256);
for (int i = 0; i < 256; i++)
h[i] = 0;
for (int i = 0; i < npixels; i++)
h[pixels[i]]++;
int mini, maxi;
for (mini = 0; mini < 256 && h[mini] == 0; mini++);
for (maxi = 255; maxi >= 0 && h[maxi] == 0; maxi--);
int q = 255 / (maxi - mini);
for (int i = 0; i < npixels; i++)
pixels[i] = (pixels[i] - mini) * q;
free(h);
}
void hist_omp(uint8_t *pixels, int height, int width)
{
uint64_t npixels = height * width;
// Number of an occurrence of an each pixel in the image
int *h = xmalloc(sizeof(*h) * 256);
for (int i = 0; i < 256; i++)
h[i] = 0;
#pragma omp parallel
{
// Local histogram for each thread
int *hloc = xmalloc(sizeof(*hloc) * 256);
for (int i = 0; i < 256; i++)
hloc[i] = 0;
#pragma omp for nowait
for (int i = 0; i < npixels; i++)
hloc[pixels[i]]++;
#pragma omp critical
{
for (int i = 0; i < 256; i++)
h[i] += hloc[i];
}
free(hloc);
#pragma omp barrier
int mini, maxi;
for (mini = 0; mini < 256 && h[mini] == 0; mini++);
for (maxi = 255; maxi >=0 && h[maxi] == 0; maxi--);
int q = 255 / (maxi - mini);
#pragma omp for
for (int i = 0; i < npixels; i++)
pixels[i] = (pixels[i] - mini) * q;
}
free(h);
}
void hist_omp2(uint8_t *pixels, int height, int width)
{
uint64_t npixels = height * width;
// Number of an occurrence of an each pixel in the image
int *h = xmalloc(sizeof(*h) * 256);
for (int i = 0; i < 256; i++)
h[i] = 0;
int mini = 256, maxi = -1;
#pragma omp parallel
{
int *hloc = xmalloc(sizeof(*hloc) * 256);
for (int i = 0; i < 256; i++)
hloc[i] = 0;
#pragma omp for nowait
for (int i = 0; i < npixels; i++)
hloc[pixels[i]]++;
int mini_loc, maxi_loc;
for (mini_loc = 0; mini_loc < 256 && hloc[mini_loc] == 0; mini_loc++);
for (maxi_loc = 255; maxi_loc >= 0 && hloc[maxi_loc] == 0; maxi_loc--);
#pragma omp critical
{
if (mini > mini_loc)
mini = mini_loc;
if (maxi < maxi_loc)
maxi = maxi_loc;
}
int q = 255 / (maxi - mini);
#pragma omp for
for (int i = 0; i < npixels; i++)
pixels[i] = (pixels[i] - mini) * q;
free(hloc);
}
free(h);
}
int main(int argc, char *argv[])
{
printf("Histogram (image %dx%d ~ %" PRIu64 " MiB)\n", height, width, height * width / (1 << 20));
uint64_t npixels = width * height;
uint8_t *pixels1, *pixels2;
// Run serial version
pixels1 = xmalloc(sizeof(*pixels1) * npixels);
srand(0);
for (int i = 0; i < npixels; i++)
pixels1[i] = rand() % 256;
//pixels1[i] = (i / width) * (i % width);
double tser = omp_get_wtime();
hist_serial(pixels1, height, width);
tser = omp_get_wtime() - tser;
printf("Elapsed time (serial): %.6f\n", tser);
// Run parallel version
pixels2 = xmalloc(sizeof(*pixels2) * npixels);
srand(0);
for (int i = 0; i < npixels; i++)
pixels2[i] = rand() % 256;
//pixels2[i] = (i / width) * (i % width);
double tpar = omp_get_wtime();
hist_omp(pixels2, height, width);
tpar = omp_get_wtime() - tpar;
printf("Elapsed time (parallel): %.6f\n", tpar);
printf("Speedup: %.2f\n", tser / tpar);
for (int i = 0; i < npixels; i++) {
if (pixels1[i] != pixels2[i]) {
printf("Verification failed: %i %d %d \n", i, pixels1[i], pixels2[i]);
break;
}
}
free(pixels1);
free(pixels2);
return 0;
}
|
GB_unaryop__abs_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int8_fp64
// op(A') function: GB_tran__abs_int8_fp64
// C type: int8_t
// A type: double
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z ; GB_CAST_SIGNED(z,aij,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int8_fp64
(
int8_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
serial_tree_learner.h | #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include <LightGBM/feature.h>
#include "feature_histogram.hpp"
#include "data_partition.hpp"
#include "split_info.hpp"
#include "leaf_splits.hpp"
#include <cstdio>
#include <vector>
#include <random>
#include <cmath>
#include <memory>
namespace LightGBM {
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
explicit SerialTreeLearner(const TreeConfig* tree_config);
~SerialTreeLearner();
void Init(const Dataset* train_data) override;
void ResetConfig(const TreeConfig* tree_config) override;
Tree* Train(const score_t* gradients, const score_t *hessians) override;
void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices(used_indices, num_data);
}
void AddPredictionToScore(double* out_score) const override {
#pragma omp parallel for schedule(guided)
for (int i = 0; i < data_partition_->num_leaves(); ++i) {
double output = static_cast<double>(last_trained_tree_->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
protected:
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(int left_leaf, int right_leaf);
/*!
* \brief Find best thresholds for all features, using multi-threading.
* The result will be stored in smaller_leaf_splits_ and larger_leaf_splits_.
* This function will be called in FindBestSplit.
*/
virtual void FindBestThresholds();
/*!
* \brief Find best features for leaves from smaller_leaf_splits_ and larger_leaf_splits_.
* This function will be called after FindBestThresholds.
*/
inline virtual void FindBestSplitsForLeaves();
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
/*!
* \brief Find best features for leaf from leaf_splits
* \param leaf_splits
*/
inline void FindBestSplitForLeaf(LeafSplits* leaf_splits);
/*! \brief Last trained decision tree */
const Tree* last_trained_tree_;
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<bool> is_feature_used_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_hessians_;
/*! \brief Pointer to ordered_gradients_, use this to avoid copy at BeforeTrain */
const score_t* ptr_to_ordered_gradients_smaller_leaf_;
/*! \brief Pointer to ordered_hessians_, use this to avoid copy at BeforeTrain*/
const score_t* ptr_to_ordered_hessians_smaller_leaf_;
/*! \brief Pointer to ordered_gradients_, use this to avoid copy at BeforeTrain */
const score_t* ptr_to_ordered_gradients_larger_leaf_;
/*! \brief Pointer to ordered_hessians_, use this to avoid copy at BeforeTrain*/
const score_t* ptr_to_ordered_hessians_larger_leaf_;
/*! \brief Store ordered bin */
std::vector<std::unique_ptr<OrderedBin>> ordered_bins_;
/*! \brief True if has ordered bin */
bool has_ordered_bin_ = false;
/*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */
std::vector<char> is_data_in_leaf_;
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const TreeConfig* tree_config_;
};
inline void SerialTreeLearner::FindBestSplitsForLeaves() {
FindBestSplitForLeaf(smaller_leaf_splits_.get());
FindBestSplitForLeaf(larger_leaf_splits_.get());
}
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leafIdx) const {
if (leafIdx >= 0) {
return data_partition_->leaf_count(leafIdx);
} else {
return 0;
}
}
inline void SerialTreeLearner::FindBestSplitForLeaf(LeafSplits* leaf_splits) {
if (leaf_splits == nullptr || leaf_splits->LeafIndex() < 0) {
return;
}
std::vector<double> gains;
for (size_t i = 0; i < leaf_splits->BestSplitPerFeature().size(); ++i) {
gains.push_back(leaf_splits->BestSplitPerFeature()[i].gain);
}
int best_feature = static_cast<int>(ArrayArgs<double>::ArgMax(gains));
int leaf = leaf_splits->LeafIndex();
best_split_per_leaf_[leaf] = leaf_splits->BestSplitPerFeature()[best_feature];
best_split_per_leaf_[leaf].feature = best_feature;
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
schedule.c | #include<stdio.h>
#include<omp.h>
#include<sys/time.h>
#include<unistd.h>
#include<math.h>
#define ARRAY_SIZE 1024768
int main(int argc, char *argv[]) {
int i;
int n = atoi(argv[1]);
int *a = (int *) malloc(sizeof(int) * n);
int *b = (int *) malloc(sizeof(int) * n);
int *c = (int *) malloc(sizeof(int) * n);
struct timeval tstart, tend;
gettimeofday(&tstart, NULL);
#pragma omp parallel for schedule(static)
for(i=0;i<n;++i) {
c[i] = a[i] + b[i];
}
gettimeofday(&tend, NULL);
printf("Time taken is:%d\n",(tend.tv_usec - tstart.tv_usec)
+ (tend.tv_sec - tstart.tv_sec) * 1000000);
return 0;
}
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/enhance.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/gem.h"
#include "magick/gem-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o l o r s p a c e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageColorspaceType() returns the potential colorspace of image:
% sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc.
%
% To ensure the image type matches its potential, use SetImageColorspaceType():
%
% (void) SetImageColorspaceType(image,GetImageColorspaceType(image),
% exception);
%
% The format of the GetImageColorspaceType method is:
%
% ColorspaceType GetImageColorspaceType(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ColorspaceType GetImageColorspaceType(const Image *image,
ExceptionInfo *exception)
{
ColorspaceType
colorspace;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorspace=image->colorspace;
type=IdentifyImageType(image,exception);
if ((type == BilevelType) || (type == GrayscaleType) ||
(type == GrayscaleMatteType))
colorspace=GRAYColorspace;
return(colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the RGBTransformImage method is:
%
% MagickBooleanType RGBTransformImage(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
*/
static inline void ConvertRGBToCMY(const Quantum red,const Quantum green,
const Quantum blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static void ConvertRGBToLab(const Quantum red,const Quantum green,
const Quantum blue,double *L,double *a,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLab(X,Y,Z,L,a,b);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const Quantum red,const Quantum green,
const Quantum blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLuv(const Quantum red,const Quantum green,
const Quantum blue,double *L,double *u,double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,L,u,v);
}
static void ConvertRGBToxyY(const Quantum red,const Quantum green,
const Quantum blue,double *low_x,double *low_y,double *cap_Y)
{
double
gamma,
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
gamma=PerceptibleReciprocal(X+Y+Z);
*low_x=gamma*X;
*low_y=gamma*Y;
*cap_Y=Y;
}
static void ConvertRGBToYPbPr(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static void ConvertRGBToYDbDr(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
MagickExport MagickBooleanType RGBTransformImage(Image *image,
const ColorspaceType colorspace)
{
#define RGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
register ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
status=MagickTrue;
progress=0;
exception=(&image->exception);
switch (colorspace)
{
case CMYKColorspace:
{
MagickPixelPacket
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
pixel.red=(MagickRealType) pixel.red;
pixel.green=(MagickRealType) pixel.green;
pixel.blue=(MagickRealType) pixel.blue;
ConvertRGBToCMYK(&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGray(q,ClampToQuantum(GetPixelIntensity(image,q)));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
/*
Transform image from sRGB to HSI.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
X,
Y,
Z;
Quantum
blue,
green,
red;
red=ClampToQuantum((MagickRealType) GetPixelRed(q));
green=ClampToQuantum((MagickRealType) GetPixelGreen(q));
blue=ClampToQuantum((MagickRealType) GetPixelBlue(q));
switch (colorspace)
{
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma");
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma");
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black");
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white");
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/
film_gamma))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,logmap[ScaleQuantumToMap(red)]);
SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]);
SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601LumaColorspace:
{
/*
Initialize Rec601 luma tables:
G = 0.298839*R+0.586811*G+0.114350*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
x_map[i].y=(MagickRealType) (0.298839*(double) i);
x_map[i].z=(MagickRealType) (0.298839*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
y_map[i].y=(MagickRealType) (0.586811*(double) i);
y_map[i].z=(MagickRealType) (0.586811*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
z_map[i].y=(MagickRealType) (0.114350*(double) i);
z_map[i].z=(MagickRealType) (0.114350*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709LumaColorspace:
{
/*
Initialize Rec709 luma tables:
G = 0.212656*R+0.715158*G+0.072186*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
x_map[i].y=(MagickRealType) (0.212656*(double) i);
x_map[i].z=(MagickRealType) (0.212656*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
y_map[i].y=(MagickRealType) (0.715158*(double) i);
y_map[i].z=(MagickRealType) (0.715158*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
z_map[i].y=(MagickRealType) (0.072186*(double) i);
z_map[i].z=(MagickRealType) (0.072186*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.005382*i;
x_map[i].y=(-0.003296)*i;
x_map[i].z=0.009410*i;
y_map[i].x=0.010566*i;
y_map[i].y=(-0.006471)*i;
y_map[i].z=(-0.007880)*i;
z_map[i].x=0.002052*i;
z_map[i].y=0.009768*i;
z_map[i].z=(-0.001530)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.298839*(1.099*i-0.099);
x_map[i].y=(-0.298839)*(1.099*i-0.099);
x_map[i].z=0.70100*(1.099*i-0.099);
y_map[i].x=0.586811*(1.099*i-0.099);
y_map[i].y=(-0.586811)*(1.099*i-0.099);
y_map[i].z=(-0.586811)*(1.099*i-0.099);
z_map[i].x=0.114350*(1.099*i-0.099);
z_map[i].y=0.88600*(1.099*i-0.099);
z_map[i].z=(-0.114350)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
register size_t
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
(MagickRealType) primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
(MagickRealType) primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
(MagickRealType) primary_info.z;
SetPixelRed(q,ScaleMapToQuantum(pixel.red));
SetPixelGreen(q,ScaleMapToQuantum(pixel.green));
SetPixelBlue(q,ScaleMapToQuantum(pixel.blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RGBTransformImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
register size_t
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
MagickPixelPacket
pixel;
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=ScaleMapToQuantum(pixel.red);
image->colormap[i].green=ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace)
{
ImageType
type;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) memset(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if (colorspace == LinearGRAYColorspace)
image->gamma=1.0;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.0;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,&image->exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageGray() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities and changes the type of the image to
% bi-level or grayscale.
%
% The format of the SetImageGray method is:
%
% MagickBooleanType SetImageGray(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageGray(Image *image,
ExceptionInfo *exception)
{
const char
*value;
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
if ((IsGrayColorspace(image->colorspace) == MagickFalse) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale");
if (IsStringNotFalse(value) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
if ((type == GrayscaleType) && (image->matte != MagickFalse))
image->type=GrayscaleMatteType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMonochrome() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange and changes the type of the image to bi-level.
%
% The format of the SetImageMonochrome method is:
%
% MagickBooleanType SetImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMonochrome(Image *image,
ExceptionInfo *exception)
{
const char
*value;
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if ((IsGrayColorspace(image->colorspace) == MagickFalse) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale");
if (IsStringNotFalse(value) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsMonochromePixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(MagickTrue);
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformRGBImage(image,image->colorspace));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformRGBImage(image,image->colorspace);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (RGBTransformImage(image,colorspace) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the TransformRGBImage method is:
%
% MagickBooleanType TransformRGBImage(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(1.0-cyan));
*green=ClampToQuantum(QuantumRange*(1.0-magenta));
*blue=ClampToQuantum(QuantumRange*(1.0-yellow));
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const MagickRealType value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,Quantum *red,Quantum *green,Quantum *blue)
{
double
gamma,
X,
Y,
Z;
gamma=PerceptibleReciprocal(low_y);
X=gamma*cap_Y*low_x;
Y=cap_Y;
Z=gamma*cap_Y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y-
1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5)));
*green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y-
0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5)));
*blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+
1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5)));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,Quantum *red,Quantum *green,Quantum *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5)));
*green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5)));
*blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5)));
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+
0.6210244164652610754*(Q-0.5)));
*green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)-
0.6473805968256950427*(Q-0.5)));
*blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+
1.7046149983646481374*(Q-0.5)));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+
1.1398279671717170825*(V-0.5)));
*green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)-
0.5805003156565656797*(V-0.5)));
*blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)-
4.813762626262513e-04*(V-0.5)));
}
MagickExport MagickBooleanType TransformRGBImage(Image *image,
const ColorspaceType colorspace)
{
#define TransformRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000
};
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
exception=(&image->exception);
switch (colorspace)
{
case CMYKColorspace:
{
MagickPixelPacket
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
case Rec601LumaColorspace:
case Rec709LumaColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=(MagickRealType) GetPixelGray(q);
if ((image->intensity == Rec601LuminancePixelIntensityMethod) ||
(image->intensity == Rec709LuminancePixelIntensityMethod))
gray=EncodePixelGamma(gray);
SetPixelRed(q,ClampToQuantum(gray));
SetPixelGreen(q,ClampToQuantum(gray));
SetPixelBlue(q,ClampToQuantum(gray));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case CMYColorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
/*
Transform image from source colorspace to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
X,
Y,
Z;
Quantum
blue,
green,
red;
X=QuantumScale*GetPixelRed(q);
Y=QuantumScale*GetPixelGreen(q);
Z=QuantumScale*GetPixelBlue(q);
switch (colorspace)
{
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=ClampToQuantum(QuantumRange*X);
green=ClampToQuantum(QuantumRange*Y);
blue=ClampToQuantum(QuantumRange*Z);
break;
}
}
SetPixelRed(q,ClampToQuantum((MagickRealType) red));
SetPixelGreen(q,ClampToQuantum((MagickRealType) green));
SetPixelBlue(q,ClampToQuantum((MagickRealType) blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma");
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma");
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black");
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white");
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/
film_gamma)-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelRed(q))]));
green=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelGreen(q))]));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelBlue(q))]));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q)));
green=ClampToQuantum(EncodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(1.0*(double) i);
y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(1.0*(double) i);
y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(1.0*(double) i);
y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) (0.0000000);
z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) (0.0000000);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(q));
green=ScaleQuantumToMap(GetPixelGreen(q));
blue=ScaleQuantumToMap(GetPixelBlue(q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransformRGBImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
MagickPixelPacket
pixel;
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(image->colormap[i].red);
green=ScaleQuantumToMap(image->colormap[i].green);
blue=ScaleQuantumToMap(image->colormap[i].blue);
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=ClampToQuantum(pixel.red);
image->colormap[i].green=ClampToQuantum(pixel.green);
image->colormap[i].blue=ClampToQuantum(pixel.blue);
}
(void) SyncImage(image);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
soft_selection_move_generator.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_NEIGHBORHOOD_SOFT_SELECTION_MOVE_MOVE_GENERATOR_H__
#define PRINTEMPS_NEIGHBORHOOD_SOFT_SELECTION_MOVE_MOVE_GENERATOR_H__
#include "abstract_move_generator.h"
namespace printemps {
namespace neighborhood {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
class SoftSelectionMoveGenerator
: public AbstractMoveGenerator<T_Variable, T_Expression> {
private:
public:
/*************************************************************************/
SoftSelectionMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
virtual ~SoftSelectionMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
constexpr void setup(
const std::vector<model_component::Constraint<T_Variable, T_Expression>
*> &a_RAW_CONSTRAINT_PTRS) {
/**
* Exclude constraints which contain fixed variables or selection
* variables.
*/
auto constraint_ptrs =
extract_effective_constraint_ptrs(a_RAW_CONSTRAINT_PTRS);
std::vector<Move<T_Variable, T_Expression>> moves;
for (auto &&constraint_ptr : constraint_ptrs) {
const auto &sensitivities =
constraint_ptr->expression().sensitivities();
const auto aux_variable_ptr = constraint_ptr->aux_variable_ptr();
std::vector<Move<T_Variable, T_Expression>> moves;
moves.reserve(2 * sensitivities.size());
for (auto &&sensitivity : sensitivities) {
const auto variable_ptr = sensitivity.first;
if (variable_ptr == aux_variable_ptr) {
continue;
}
Move<T_Variable, T_Expression> move_first;
Move<T_Variable, T_Expression> move_second;
move_first.related_constraint_ptrs = utility::union_set(
variable_ptr->related_constraint_ptrs(),
aux_variable_ptr->related_constraint_ptrs());
move_first.sense = MoveSense::SoftSelection;
move_first.is_univariable_move = false;
move_first.is_selection_move = false;
move_first.is_special_neighborhood_move = true;
move_first.is_available = true;
move_first.overlap_rate = 0.0;
move_second = move_first;
move_first.alterations.emplace_back(variable_ptr, 0);
move_first.alterations.emplace_back(aux_variable_ptr, 0);
move_second.alterations.emplace_back(variable_ptr, 1);
move_second.alterations.emplace_back(aux_variable_ptr, 1);
moves.push_back(move_first);
moves.push_back(move_second);
}
this->m_moves.insert(this->m_moves.end(), moves.begin(),
moves.end());
}
this->m_flags.resize(this->m_moves.size());
/**
* Setup move updater
*/
auto move_updater = //
[this](auto * a_moves_ptr, //
auto * a_flags, //
const bool a_ACCEPT_ALL, //
const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, //
const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, //
[[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) {
const int MOVES_SIZE = a_moves_ptr->size();
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < MOVES_SIZE; i++) {
(*a_flags)[i] = 1;
if (!(*a_moves_ptr)[i].is_available) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_fixed_variable((*a_moves_ptr)[i])) {
(*a_flags)[i] = 0;
continue;
}
for (const auto &alteration :
(*a_moves_ptr)[i].alterations) {
if (alteration.first->value() == alteration.second) {
(*a_flags)[i] = 0;
break;
}
}
if ((*a_flags)[i] == 0) {
continue;
}
if (a_ACCEPT_ALL) {
/** nothing to do */
} else {
if (a_ACCEPT_OBJECTIVE_IMPROVABLE &&
neighborhood::has_objective_improvable_variable(
(*a_moves_ptr)[i])) {
continue;
}
if (a_ACCEPT_FEASIBILITY_IMPROVABLE &&
neighborhood::has_feasibility_improvable_variable(
(*a_moves_ptr)[i])) {
continue;
}
(*a_flags)[i] = 0;
}
}
};
this->m_move_updater = move_updater;
}
};
} // namespace neighborhood
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *
startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange,
TypeSourceInfo *MethodType, SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Optional<std::pair<unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, and false is returned.
bool CheckConstraintExpression(Expr *CE);
bool CalculateConstraintSatisfaction(ConceptDecl *NamedConcept,
MultiLevelTemplateArgumentList &MLTAL,
Expr *ConstraintExpr,
bool &IsSatisfied);
/// Check that the associated constraints of a template declaration match the
/// associated constraints of an older declaration of which it is a
/// redeclaration.
bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old,
TemplateParameterList *New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
SourceLocation ConceptNameLoc, NamedDecl *FoundDecl,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, TemplateDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
struct OpenMPDeclareVariantCtsSelectorData {
OMPDeclareVariantAttr::CtxSelectorSetType CtxSet =
OMPDeclareVariantAttr::CtxSetUnknown;
OMPDeclareVariantAttr::CtxSelectorType Ctx =
OMPDeclareVariantAttr::CtxUnknown;
MutableArrayRef<StringRef> ImplVendors;
ExprResult CtxScore;
explicit OpenMPDeclareVariantCtsSelectorData() = default;
explicit OpenMPDeclareVariantCtsSelectorData(
OMPDeclareVariantAttr::CtxSelectorSetType CtxSet,
OMPDeclareVariantAttr::CtxSelectorType Ctx,
MutableArrayRef<StringRef> ImplVendors, ExprResult CtxScore)
: CtxSet(CtxSet), Ctx(Ctx), ImplVendors(ImplVendors),
CtxScore(CtxScore) {}
};
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(
DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param Data Set of context-specific data for the specified context
/// selector.
void ActOnOpenMPDeclareVariantDirective(
FunctionDecl *FD, Expr *VariantRef, SourceRange SR,
const Sema::OpenMPDeclareVariantCtsSelectorData &Data);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
QualType BaseType, QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
assign_scalar_variable_to_conditions_process.h | //
// | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Josep Maria Carbonell
//
#if !defined(KRATOS_ASSIGN_SCALAR_VARIABLE_TO_CONDITIONS_PROCESS_H_INCLUDED )
#define KRATOS_ASSIGN_SCALAR_VARIABLE_TO_CONDITIONS_PROCESS_H_INCLUDED
// System includes
// External includes
// Project includes
#include "includes/model_part.h"
#include "includes/kratos_parameters.h"
#include "processes/process.h"
namespace Kratos
{
///@name Kratos Classes
///@{
/// The base class for assigning a value to scalar variables or array_1d components processes in Kratos.
/** This function assigns a value to a variable belonging to all of the nodes in a given mesh
*/
class AssignScalarVariableToConditionsProcess : public Process
{
public:
///@name Type Definitions
///@{
typedef VariableComponent<VectorComponentAdaptor<array_1d<double, 3> > > array_1d_component_type;
/// Pointer definition of AssignScalarVariableToConditionsProcess
KRATOS_CLASS_POINTER_DEFINITION(AssignScalarVariableToConditionsProcess);
///@}
///@name Life Cycle
///@{
AssignScalarVariableToConditionsProcess(
ModelPart& rModelPart,
Parameters rParameters
) : Process(Flags()) ,
mrModelPart(rModelPart)
{
KRATOS_TRY
Parameters default_parameters( R"(
{
"model_part_name":"MODEL_PART_NAME",
"mesh_id": 0,
"variable_name": "VARIABLE_NAME",
"value" : 1.0
} )" );
// Validate against defaults -- this ensures no type mismatch
rParameters.ValidateAndAssignDefaults(default_parameters);
mmesh_id = rParameters["mesh_id"].GetInt();
mvariable_name = rParameters["variable_name"].GetString();
if( KratosComponents< Variable<double> >::Has( mvariable_name )) //case of double variable
{
mdouble_value = rParameters["value"].GetDouble();
}
else if( KratosComponents<array_1d_component_type>::Has( mvariable_name ) ) //case of component variable
{
mdouble_value = rParameters["value"].GetDouble();
}
else if( KratosComponents< Variable<int> >::Has( mvariable_name ) ) //case of int variable
{
mint_value = rParameters["value"].GetInt();
}
else if( KratosComponents< Variable<bool> >::Has( mvariable_name ) ) //case of bool variable
{
mbool_value = rParameters["value"].GetBool();
}
else
{
KRATOS_ERROR <<"Trying to set a variable that is not in the model_part - variable name is " << mvariable_name << std::endl;
}
KRATOS_CATCH("");
}
/// Destructor.
~AssignScalarVariableToConditionsProcess() override {}
///@}
///@name Operators
///@{
/// This operator is provided to call the process as a function and simply calls the Execute method.
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
/// Execute method is used to execute the AssignScalarVariableToConditionsProcess algorithms.
void Execute() override
{
KRATOS_TRY;
if( KratosComponents< Variable<double> >::Has( mvariable_name )) //case of double variable
{
InternalAssignValue<>(KratosComponents< Variable<double> >::Get(mvariable_name), mdouble_value);
}
else if( KratosComponents<array_1d_component_type>::Has( mvariable_name ) ) //case of component variable
{
InternalAssignValueSerial<>(KratosComponents<array_1d_component_type>::Get(mvariable_name), mdouble_value);
}
else if( KratosComponents< Variable<int> >::Has( mvariable_name ) ) //case of int variable
{
InternalAssignValue<>(KratosComponents< Variable<int> >::Get(mvariable_name) , mint_value);
}
else if( KratosComponents< Variable<bool> >::Has( mvariable_name ) ) //case of bool variable
{
InternalAssignValue<>(KratosComponents< Variable<bool> >::Get(mvariable_name), mbool_value);
}
else
{
KRATOS_ERROR << "Not able to set the variable. Attempting to set variable: " << mvariable_name << std::endl;
}
KRATOS_CATCH("");
}
void ExecuteInitializeSolutionStep() override
{
Execute();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "AssignScalarVariableToConditionsProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "AssignScalarVariableToConditionsProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
/// Copy constructor.
AssignScalarVariableToConditionsProcess(AssignScalarVariableToConditionsProcess const& rOther);
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart& mrModelPart;
std::string mvariable_name;
// double mdouble_value;
double mdouble_value;
int mint_value;
bool mbool_value;
std::size_t mmesh_id;
///@}
///@name Private Operators
///@{
template< class TVarType, class TDataType >
void InternalAssignValue(TVarType& rVar, const TDataType value)
{
const int nconditions = mrModelPart.GetMesh(mmesh_id).Conditions().size();
if(nconditions != 0)
{
ModelPart::ConditionsContainerType::iterator it_begin = mrModelPart.GetMesh(mmesh_id).ConditionsBegin();
#pragma omp parallel for
for(int i = 0; i<nconditions; i++)
{
ModelPart::ConditionsContainerType::iterator it = it_begin + i;
it->SetValue(rVar, value);
}
}
}
template< class TVarType, class TDataType >
void InternalAssignValueSerial(TVarType& rVar, const TDataType value)
{
const int nconditions = mrModelPart.GetMesh(mmesh_id).Conditions().size();
if(nconditions != 0)
{
ModelPart::ConditionsContainerType::iterator it_begin = mrModelPart.GetMesh(mmesh_id).ConditionsBegin();
for(int i = 0; i<nconditions; i++)
{
ModelPart::ConditionsContainerType::iterator it = it_begin + i;
it->SetValue(rVar, value);
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
/// Assignment operator.
AssignScalarVariableToConditionsProcess& operator=(AssignScalarVariableToConditionsProcess const& rOther);
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class AssignScalarVariableToConditionsProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
AssignScalarVariableToConditionsProcess& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const AssignScalarVariableToConditionsProcess& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_ASSIGN_SCALAR_VARIABLE_TO_CONDITIONS_PROCESS_H_INCLUDED defined
|
weighted_sptree.h | /*
*
* Copyright (c) 2014, Laurens van der Maaten (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY LAURENS VAN DER MAATEN ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL LAURENS VAN DER MAATEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
/*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef WEIGHTED_SPTREE_H
#define WEIGHTED_SPTREE_H
#include <iostream>
#include <vector>
#include <unordered_map>
#ifdef __USE_GCD__
#include <dispatch/dispatch.h>
#endif
namespace hdi{
namespace dr{
//! Sparse Partitioning Tree used for the Barnes Hut approximation
/*!
Sparse Partitioning Tree used for the Barnes Hut approximation.
The original version was implemented by Laurens van der Maaten,
\author Laurens van der Maaten
\author Nicola Pezzotti
*/
template <typename scalar_type>
class WeightedSPTree{
public:
typedef double hp_scalar_type;
private:
class Cell {
unsigned int _emb_dimension;
hp_scalar_type* corner;
hp_scalar_type* width;
public:
Cell(unsigned int emb_dimension);
Cell(unsigned int emb_dimension, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
~Cell();
hp_scalar_type getCorner(unsigned int d);
hp_scalar_type getWidth(unsigned int d);
void setCorner(unsigned int d, hp_scalar_type val);
void setWidth(unsigned int d, hp_scalar_type val);
bool containsPoint(scalar_type point[]);
};
// Fixed constants
static const unsigned int QT_NODE_CAPACITY = 1;
// A buffer we use when doing force computations
//hp_scalar_type* buff;
// Properties of this node in the tree
WeightedSPTree* parent;
unsigned int _emb_dimension;
bool is_leaf;
unsigned int size;
hp_scalar_type cum_size;
// Axis-aligned bounding box stored as a center with half-_emb_dimensions to represent the boundaries of this quad tree
Cell* boundary;
// Indices in this space-partitioning tree node, corresponding center-of-mass, and list of all children
scalar_type* _emb_positions;
const scalar_type* _weights;
hp_scalar_type* _center_of_mass;
unsigned int index[QT_NODE_CAPACITY];
// Children
WeightedSPTree** children;
unsigned int no_children;
public:
WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N);
private:
WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
WeightedSPTree(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
WeightedSPTree(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
public:
~WeightedSPTree();
void setData(scalar_type* inp_data, const scalar_type* weights);
WeightedSPTree* getParent();
bool insert(unsigned int new_index);
void subdivide();
bool isCorrect();
void getAllIndices(unsigned int* indices);
unsigned int getDepth();
void computeNonEdgeForces(unsigned int point_index, hp_scalar_type theta, hp_scalar_type neg_f[], hp_scalar_type& sum_Q)const;
template <class sparse_scalar_matrix_type>
void computeEdgeForces(const sparse_scalar_matrix_type& matrix, hp_scalar_type multiplier, hp_scalar_type* pos_f)const;
void print();
private:
void init(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
void fill(unsigned int N);
unsigned int getAllIndices(unsigned int* indices, unsigned int loc);
};
/////////////////////////////////////////////////////////////////
template <typename scalar_type>
template <class sparse_scalar_matrix_type>
void WeightedSPTree<scalar_type>::computeEdgeForces(const sparse_scalar_matrix_type& sparse_matrix, hp_scalar_type multiplier, hp_scalar_type* pos_f)const{
const int n = sparse_matrix.size();
// Loop over all edges in the graph
#ifdef __USE_GCD__
std::cout << "GCD dispatch, weighted_sptree 176.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
std::vector<hp_scalar_type> buff(_emb_dimension,0);
unsigned int ind1, ind2;
hp_scalar_type q_ij_1;
ind1 = j * _emb_dimension;
for(auto elem: sparse_matrix[j]) {
// Compute pairwise distance and Q-value
q_ij_1 = 1.0;
ind2 = elem.first * _emb_dimension;
for(unsigned int d = 0; d < _emb_dimension; d++)
buff[d] = _emb_positions[ind1 + d] - _emb_positions[ind2 + d]; //buff contains (yi-yj) per each _emb_dimension
for(unsigned int d = 0; d < _emb_dimension; d++)
q_ij_1 += buff[d] * buff[d];
hp_scalar_type p_ij = elem.second;
hp_scalar_type res = hp_scalar_type(p_ij) * multiplier / q_ij_1 / n;
// Sum positive force
for(unsigned int d = 0; d < _emb_dimension; d++)
pos_f[ind1 + d] += res * buff[d] * multiplier; //(p_ij*q_j*mult) * (yi-yj)
}
}
#ifdef __USE_GCD__
);
#endif
}
}
}
#endif
|
trmv_x_sky_u_hi_trans.c | #include "alphasparse/kernel.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT r = 0; r < m; ++r)
{
const ALPHA_INT row_start = A->pointers[r];
const ALPHA_INT row_end = A->pointers[r + 1];
ALPHA_INT row_indx = 1;
for(ALPHA_INT i = row_start; i < row_end; i++)
{
ALPHA_INT row_eles = row_end - row_start;
ALPHA_INT c = r - row_eles + row_indx;
if(i == row_end - 1)
{
alpha_madde(y[r], alpha, x[c]);
}
else
{
ALPHA_Number t;
alpha_mul(t, alpha, A->values[i]);
alpha_madde(y[r], t, x[c]);
}
row_indx ++;
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
convolution_1x1_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt, int activation_type, const Mat& activation_params)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(8, inch, size/8 + (size%8)/4 + (size%4)/2 + size%2, elemsize, elempack, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
float* tmpptr = tmp.channel(i/8);
for (int q=0; q<inch; q++)
{
float32x4_t _r0 = vld1q_f32(img0);
float32x4_t _r1 = vld1q_f32(img0+4);
float32x4_t _r2 = vld1q_f32(img0+8);
float32x4_t _r3 = vld1q_f32(img0+12);
float32x4_t _r4 = vld1q_f32(img0+16);
float32x4_t _r5 = vld1q_f32(img0+20);
float32x4_t _r6 = vld1q_f32(img0+24);
float32x4_t _r7 = vld1q_f32(img0+28);
vst1q_f32(tmpptr, _r0);
vst1q_f32(tmpptr+4, _r1);
vst1q_f32(tmpptr+8, _r2);
vst1q_f32(tmpptr+12, _r3);
vst1q_f32(tmpptr+16, _r4);
vst1q_f32(tmpptr+20, _r5);
vst1q_f32(tmpptr+24, _r6);
vst1q_f32(tmpptr+28, _r7);
tmpptr += 32;
img0 += bottom_blob.cstep * 4;
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
float* tmpptr = tmp.channel(i/8 + (i%8)/4);
for (int q=0; q<inch; q++)
{
float32x4_t _r0 = vld1q_f32(img0);
float32x4_t _r1 = vld1q_f32(img0+4);
float32x4_t _r2 = vld1q_f32(img0+8);
float32x4_t _r3 = vld1q_f32(img0+12);
vst1q_f32(tmpptr, _r0);
vst1q_f32(tmpptr+4, _r1);
vst1q_f32(tmpptr+8, _r2);
vst1q_f32(tmpptr+12, _r3);
tmpptr += 16;
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
float* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2);
for (int q=0; q<inch; q++)
{
float32x4_t _r0 = vld1q_f32(img0);
float32x4_t _r1 = vld1q_f32(img0+4);
vst1q_f32(tmpptr, _r0);
vst1q_f32(tmpptr+4, _r1);
tmpptr += 8;
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
float* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2 + i%2);
for (int q=0; q<inch; q++)
{
float32x4_t _r0 = vld1q_f32(img0);
vst1q_f32(tmpptr, _r0);
tmpptr += 4;
img0 += bottom_blob.cstep * 4;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
float32x4_t _bias0 = bias ? vld1q_f32(bias + p * 4) : vdupq_n_f32(0.f);
float32x4_t _bias1 = bias ? vld1q_f32(bias + (p+1) * 4) : vdupq_n_f32(0.f);
float* outptr0 = out0;
float* outptr1 = out1;
int i=0;
for (; i+7<size; i+=8)
{
const float* tmpptr = tmp.channel(i/8);
float32x4_t _sum0_0 = _bias0;
float32x4_t _sum1_0 = _bias0;
float32x4_t _sum2_0 = _bias0;
float32x4_t _sum3_0 = _bias0;
float32x4_t _sum4_0 = _bias0;
float32x4_t _sum5_0 = _bias0;
float32x4_t _sum6_0 = _bias0;
float32x4_t _sum7_0 = _bias0;
float32x4_t _sum0_1 = _bias1;
float32x4_t _sum1_1 = _bias1;
float32x4_t _sum2_1 = _bias1;
float32x4_t _sum3_1 = _bias1;
float32x4_t _sum4_1 = _bias1;
float32x4_t _sum5_1 = _bias1;
float32x4_t _sum6_1 = _bias1;
float32x4_t _sum7_1 = _bias1;
const float* kptr0 = (const float*)kernel + p * inch * 16;
const float* kptr1 = (const float*)kernel + (p+1) * inch * 16;
for (int q=0; q<inch; q++)
{
// const float* r0 = bottom_blob.channel(q);
// float32x4_t _r0 = vld1q_f32(r0 + i*4);
// float32x4_t _r1 = vld1q_f32(r0 + (i+1)*4);
// float32x4_t _r2 = vld1q_f32(r0 + (i+2)*4);
// float32x4_t _r3 = vld1q_f32(r0 + (i+3)*4);
float32x4_t _r0 = vld1q_f32( tmpptr );
float32x4_t _r1 = vld1q_f32( tmpptr + 4 );
float32x4_t _r2 = vld1q_f32( tmpptr + 8 );
float32x4_t _r3 = vld1q_f32( tmpptr + 12 );
float32x4_t _r4 = vld1q_f32( tmpptr + 16 );
float32x4_t _r5 = vld1q_f32( tmpptr + 20 );
float32x4_t _r6 = vld1q_f32( tmpptr + 24 );
float32x4_t _r7 = vld1q_f32( tmpptr + 28 );
float32x4_t _w0_0 = vld1q_f32( kptr0 );
float32x4_t _w1_0 = vld1q_f32( kptr0 + 4 );
float32x4_t _w2_0 = vld1q_f32( kptr0 + 8 );
float32x4_t _w3_0 = vld1q_f32( kptr0 + 12 );
float32x4_t _w0_1 = vld1q_f32( kptr1 );
float32x4_t _w1_1 = vld1q_f32( kptr1 + 4 );
float32x4_t _w2_1 = vld1q_f32( kptr1 + 8 );
float32x4_t _w3_1 = vld1q_f32( kptr1 + 12 );
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w0_0, _r0, 0);
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w1_0, _r0, 1);
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w2_0, _r0, 2);
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w3_0, _r0, 3);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w0_0, _r1, 0);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w1_0, _r1, 1);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w2_0, _r1, 2);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w3_0, _r1, 3);
_sum2_0 = vmlaq_laneq_f32(_sum2_0, _w0_0, _r2, 0);
_sum2_0 = vmlaq_laneq_f32(_sum2_0, _w1_0, _r2, 1);
_sum2_0 = vmlaq_laneq_f32(_sum2_0, _w2_0, _r2, 2);
_sum2_0 = vmlaq_laneq_f32(_sum2_0, _w3_0, _r2, 3);
_sum3_0 = vmlaq_laneq_f32(_sum3_0, _w0_0, _r3, 0);
_sum3_0 = vmlaq_laneq_f32(_sum3_0, _w1_0, _r3, 1);
_sum3_0 = vmlaq_laneq_f32(_sum3_0, _w2_0, _r3, 2);
_sum3_0 = vmlaq_laneq_f32(_sum3_0, _w3_0, _r3, 3);
_sum4_0 = vmlaq_laneq_f32(_sum4_0, _w0_0, _r4, 0);
_sum4_0 = vmlaq_laneq_f32(_sum4_0, _w1_0, _r4, 1);
_sum4_0 = vmlaq_laneq_f32(_sum4_0, _w2_0, _r4, 2);
_sum4_0 = vmlaq_laneq_f32(_sum4_0, _w3_0, _r4, 3);
_sum5_0 = vmlaq_laneq_f32(_sum5_0, _w0_0, _r5, 0);
_sum5_0 = vmlaq_laneq_f32(_sum5_0, _w1_0, _r5, 1);
_sum5_0 = vmlaq_laneq_f32(_sum5_0, _w2_0, _r5, 2);
_sum5_0 = vmlaq_laneq_f32(_sum5_0, _w3_0, _r5, 3);
_sum6_0 = vmlaq_laneq_f32(_sum6_0, _w0_0, _r6, 0);
_sum6_0 = vmlaq_laneq_f32(_sum6_0, _w1_0, _r6, 1);
_sum6_0 = vmlaq_laneq_f32(_sum6_0, _w2_0, _r6, 2);
_sum6_0 = vmlaq_laneq_f32(_sum6_0, _w3_0, _r6, 3);
_sum7_0 = vmlaq_laneq_f32(_sum7_0, _w0_0, _r7, 0);
_sum7_0 = vmlaq_laneq_f32(_sum7_0, _w1_0, _r7, 1);
_sum7_0 = vmlaq_laneq_f32(_sum7_0, _w2_0, _r7, 2);
_sum7_0 = vmlaq_laneq_f32(_sum7_0, _w3_0, _r7, 3);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w0_1, _r0, 0);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w1_1, _r0, 1);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w2_1, _r0, 2);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w3_1, _r0, 3);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w0_1, _r1, 0);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w1_1, _r1, 1);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w2_1, _r1, 2);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w3_1, _r1, 3);
_sum2_1 = vmlaq_laneq_f32(_sum2_1, _w0_1, _r2, 0);
_sum2_1 = vmlaq_laneq_f32(_sum2_1, _w1_1, _r2, 1);
_sum2_1 = vmlaq_laneq_f32(_sum2_1, _w2_1, _r2, 2);
_sum2_1 = vmlaq_laneq_f32(_sum2_1, _w3_1, _r2, 3);
_sum3_1 = vmlaq_laneq_f32(_sum3_1, _w0_1, _r3, 0);
_sum3_1 = vmlaq_laneq_f32(_sum3_1, _w1_1, _r3, 1);
_sum3_1 = vmlaq_laneq_f32(_sum3_1, _w2_1, _r3, 2);
_sum3_1 = vmlaq_laneq_f32(_sum3_1, _w3_1, _r3, 3);
_sum4_1 = vmlaq_laneq_f32(_sum4_1, _w0_1, _r4, 0);
_sum4_1 = vmlaq_laneq_f32(_sum4_1, _w1_1, _r4, 1);
_sum4_1 = vmlaq_laneq_f32(_sum4_1, _w2_1, _r4, 2);
_sum4_1 = vmlaq_laneq_f32(_sum4_1, _w3_1, _r4, 3);
_sum5_1 = vmlaq_laneq_f32(_sum5_1, _w0_1, _r5, 0);
_sum5_1 = vmlaq_laneq_f32(_sum5_1, _w1_1, _r5, 1);
_sum5_1 = vmlaq_laneq_f32(_sum5_1, _w2_1, _r5, 2);
_sum5_1 = vmlaq_laneq_f32(_sum5_1, _w3_1, _r5, 3);
_sum6_1 = vmlaq_laneq_f32(_sum6_1, _w0_1, _r6, 0);
_sum6_1 = vmlaq_laneq_f32(_sum6_1, _w1_1, _r6, 1);
_sum6_1 = vmlaq_laneq_f32(_sum6_1, _w2_1, _r6, 2);
_sum6_1 = vmlaq_laneq_f32(_sum6_1, _w3_1, _r6, 3);
_sum7_1 = vmlaq_laneq_f32(_sum7_1, _w0_1, _r7, 0);
_sum7_1 = vmlaq_laneq_f32(_sum7_1, _w1_1, _r7, 1);
_sum7_1 = vmlaq_laneq_f32(_sum7_1, _w2_1, _r7, 2);
_sum7_1 = vmlaq_laneq_f32(_sum7_1, _w3_1, _r7, 3);
tmpptr += 32;
kptr0 += 16;
kptr1 += 16;
}
if (activation_type == 1)
{
float32x4_t _zero = vdupq_n_f32(0.f);
_sum0_0 = vmaxq_f32(_sum0_0, _zero);
_sum1_0 = vmaxq_f32(_sum1_0, _zero);
_sum2_0 = vmaxq_f32(_sum2_0, _zero);
_sum3_0 = vmaxq_f32(_sum3_0, _zero);
_sum4_0 = vmaxq_f32(_sum4_0, _zero);
_sum5_0 = vmaxq_f32(_sum5_0, _zero);
_sum6_0 = vmaxq_f32(_sum6_0, _zero);
_sum7_0 = vmaxq_f32(_sum7_0, _zero);
_sum0_1 = vmaxq_f32(_sum0_1, _zero);
_sum1_1 = vmaxq_f32(_sum1_1, _zero);
_sum2_1 = vmaxq_f32(_sum2_1, _zero);
_sum3_1 = vmaxq_f32(_sum3_1, _zero);
_sum4_1 = vmaxq_f32(_sum4_1, _zero);
_sum5_1 = vmaxq_f32(_sum5_1, _zero);
_sum6_1 = vmaxq_f32(_sum6_1, _zero);
_sum7_1 = vmaxq_f32(_sum7_1, _zero);
}
else if (activation_type == 2)
{
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _slope = vdupq_n_f32(activation_params[0]);
_sum0_0 = vbslq_f32(vcleq_f32(_sum0_0, _zero), vmulq_f32(_sum0_0, _slope), _sum0_0);
_sum1_0 = vbslq_f32(vcleq_f32(_sum1_0, _zero), vmulq_f32(_sum1_0, _slope), _sum1_0);
_sum2_0 = vbslq_f32(vcleq_f32(_sum2_0, _zero), vmulq_f32(_sum2_0, _slope), _sum2_0);
_sum3_0 = vbslq_f32(vcleq_f32(_sum3_0, _zero), vmulq_f32(_sum3_0, _slope), _sum3_0);
_sum4_0 = vbslq_f32(vcleq_f32(_sum4_0, _zero), vmulq_f32(_sum4_0, _slope), _sum4_0);
_sum5_0 = vbslq_f32(vcleq_f32(_sum5_0, _zero), vmulq_f32(_sum5_0, _slope), _sum5_0);
_sum6_0 = vbslq_f32(vcleq_f32(_sum6_0, _zero), vmulq_f32(_sum6_0, _slope), _sum6_0);
_sum7_0 = vbslq_f32(vcleq_f32(_sum7_0, _zero), vmulq_f32(_sum7_0, _slope), _sum7_0);
_sum0_1 = vbslq_f32(vcleq_f32(_sum0_1, _zero), vmulq_f32(_sum0_1, _slope), _sum0_1);
_sum1_1 = vbslq_f32(vcleq_f32(_sum1_1, _zero), vmulq_f32(_sum1_1, _slope), _sum1_1);
_sum2_1 = vbslq_f32(vcleq_f32(_sum2_1, _zero), vmulq_f32(_sum2_1, _slope), _sum2_1);
_sum3_1 = vbslq_f32(vcleq_f32(_sum3_1, _zero), vmulq_f32(_sum3_1, _slope), _sum3_1);
_sum4_1 = vbslq_f32(vcleq_f32(_sum4_1, _zero), vmulq_f32(_sum4_1, _slope), _sum4_1);
_sum5_1 = vbslq_f32(vcleq_f32(_sum5_1, _zero), vmulq_f32(_sum5_1, _slope), _sum5_1);
_sum6_1 = vbslq_f32(vcleq_f32(_sum6_1, _zero), vmulq_f32(_sum6_1, _slope), _sum6_1);
_sum7_1 = vbslq_f32(vcleq_f32(_sum7_1, _zero), vmulq_f32(_sum7_1, _slope), _sum7_1);
}
else if (activation_type == 3)
{
float32x4_t _min = vdupq_n_f32(activation_params[0]);
float32x4_t _max = vdupq_n_f32(activation_params[1]);
_sum0_0 = vmaxq_f32(_sum0_0, _min);
_sum0_0 = vminq_f32(_sum0_0, _max);
_sum1_0 = vmaxq_f32(_sum1_0, _min);
_sum1_0 = vminq_f32(_sum1_0, _max);
_sum2_0 = vmaxq_f32(_sum2_0, _min);
_sum2_0 = vminq_f32(_sum2_0, _max);
_sum3_0 = vmaxq_f32(_sum3_0, _min);
_sum3_0 = vminq_f32(_sum3_0, _max);
_sum4_0 = vmaxq_f32(_sum4_0, _min);
_sum4_0 = vminq_f32(_sum4_0, _max);
_sum5_0 = vmaxq_f32(_sum5_0, _min);
_sum5_0 = vminq_f32(_sum5_0, _max);
_sum6_0 = vmaxq_f32(_sum6_0, _min);
_sum6_0 = vminq_f32(_sum6_0, _max);
_sum7_0 = vmaxq_f32(_sum7_0, _min);
_sum7_0 = vminq_f32(_sum7_0, _max);
_sum0_1 = vmaxq_f32(_sum0_1, _min);
_sum0_1 = vminq_f32(_sum0_1, _max);
_sum1_1 = vmaxq_f32(_sum1_1, _min);
_sum1_1 = vminq_f32(_sum1_1, _max);
_sum2_1 = vmaxq_f32(_sum2_1, _min);
_sum2_1 = vminq_f32(_sum2_1, _max);
_sum3_1 = vmaxq_f32(_sum3_1, _min);
_sum3_1 = vminq_f32(_sum3_1, _max);
_sum4_1 = vmaxq_f32(_sum4_1, _min);
_sum4_1 = vminq_f32(_sum4_1, _max);
_sum5_1 = vmaxq_f32(_sum5_1, _min);
_sum5_1 = vminq_f32(_sum5_1, _max);
_sum6_1 = vmaxq_f32(_sum6_1, _min);
_sum6_1 = vminq_f32(_sum6_1, _max);
_sum7_1 = vmaxq_f32(_sum7_1, _min);
_sum7_1 = vminq_f32(_sum7_1, _max);
}
else if (activation_type == 4)
{
float32x4_t _one = vdupq_n_f32(1.f);
_sum0_0 = vnegq_f32(_sum0_0);
_sum1_0 = vnegq_f32(_sum1_0);
_sum2_0 = vnegq_f32(_sum2_0);
_sum3_0 = vnegq_f32(_sum3_0);
_sum4_1 = vnegq_f32(_sum4_1);
_sum5_1 = vnegq_f32(_sum5_1);
_sum6_1 = vnegq_f32(_sum6_1);
_sum7_1 = vnegq_f32(_sum7_1);
_sum0_0 = vnegq_f32(_sum0_0);
_sum1_0 = vnegq_f32(_sum1_0);
_sum2_0 = vnegq_f32(_sum2_0);
_sum3_0 = vnegq_f32(_sum3_0);
_sum4_1 = vnegq_f32(_sum4_1);
_sum5_1 = vnegq_f32(_sum5_1);
_sum6_1 = vnegq_f32(_sum6_1);
_sum7_1 = vnegq_f32(_sum7_1);
_sum0_0 = exp_ps(_sum0_0);
_sum1_0 = exp_ps(_sum1_0);
_sum2_0 = exp_ps(_sum2_0);
_sum3_0 = exp_ps(_sum3_0);
_sum4_1 = exp_ps(_sum4_1);
_sum5_1 = exp_ps(_sum5_1);
_sum6_1 = exp_ps(_sum6_1);
_sum7_1 = exp_ps(_sum7_1);
_sum0_0 = exp_ps(_sum0_0);
_sum1_0 = exp_ps(_sum1_0);
_sum2_0 = exp_ps(_sum2_0);
_sum3_0 = exp_ps(_sum3_0);
_sum4_1 = exp_ps(_sum4_1);
_sum5_1 = exp_ps(_sum5_1);
_sum6_1 = exp_ps(_sum6_1);
_sum7_1 = exp_ps(_sum7_1);
_sum0_0 = vaddq_f32(_sum0_0, _one);
_sum1_0 = vaddq_f32(_sum1_0, _one);
_sum2_0 = vaddq_f32(_sum2_0, _one);
_sum3_0 = vaddq_f32(_sum3_0, _one);
_sum4_0 = vaddq_f32(_sum4_0, _one);
_sum5_0 = vaddq_f32(_sum5_0, _one);
_sum6_0 = vaddq_f32(_sum6_0, _one);
_sum7_0 = vaddq_f32(_sum7_0, _one);
_sum0_1 = vaddq_f32(_sum0_1, _one);
_sum1_1 = vaddq_f32(_sum1_1, _one);
_sum2_1 = vaddq_f32(_sum2_1, _one);
_sum3_1 = vaddq_f32(_sum3_1, _one);
_sum4_1 = vaddq_f32(_sum4_1, _one);
_sum5_1 = vaddq_f32(_sum5_1, _one);
_sum6_1 = vaddq_f32(_sum6_1, _one);
_sum7_1 = vaddq_f32(_sum7_1, _one);
float32x4_t _outp0_0 = vrecpeq_f32(_sum0_0);
float32x4_t _outp1_0 = vrecpeq_f32(_sum1_0);
float32x4_t _outp2_0 = vrecpeq_f32(_sum2_0);
float32x4_t _outp3_0 = vrecpeq_f32(_sum3_0);
float32x4_t _outp4_0 = vrecpeq_f32(_sum4_0);
float32x4_t _outp5_0 = vrecpeq_f32(_sum5_0);
float32x4_t _outp6_0 = vrecpeq_f32(_sum6_0);
float32x4_t _outp7_0 = vrecpeq_f32(_sum7_0);
float32x4_t _outp0_1 = vrecpeq_f32(_sum0_1);
float32x4_t _outp1_1 = vrecpeq_f32(_sum1_1);
float32x4_t _outp2_1 = vrecpeq_f32(_sum2_1);
float32x4_t _outp3_1 = vrecpeq_f32(_sum3_1);
float32x4_t _outp4_1 = vrecpeq_f32(_sum4_1);
float32x4_t _outp5_1 = vrecpeq_f32(_sum5_1);
float32x4_t _outp6_1 = vrecpeq_f32(_sum6_1);
float32x4_t _outp7_1 = vrecpeq_f32(_sum7_1);
_outp0_0 = vmulq_f32(vrecpsq_f32(_sum0_0, _outp0_0), _outp0_0);
_outp1_0 = vmulq_f32(vrecpsq_f32(_sum1_0, _outp1_0), _outp1_0);
_outp2_0 = vmulq_f32(vrecpsq_f32(_sum2_0, _outp2_0), _outp2_0);
_outp3_0 = vmulq_f32(vrecpsq_f32(_sum3_0, _outp3_0), _outp3_0);
_outp4_0 = vmulq_f32(vrecpsq_f32(_sum4_0, _outp4_0), _outp4_0);
_outp5_0 = vmulq_f32(vrecpsq_f32(_sum5_0, _outp5_0), _outp5_0);
_outp6_0 = vmulq_f32(vrecpsq_f32(_sum6_0, _outp6_0), _outp6_0);
_outp7_0 = vmulq_f32(vrecpsq_f32(_sum7_0, _outp7_0), _outp7_0);
_outp0_1 = vmulq_f32(vrecpsq_f32(_sum0_1, _outp0_1), _outp0_1);
_outp1_1 = vmulq_f32(vrecpsq_f32(_sum1_1, _outp1_1), _outp1_1);
_outp2_1 = vmulq_f32(vrecpsq_f32(_sum2_1, _outp2_1), _outp2_1);
_outp3_1 = vmulq_f32(vrecpsq_f32(_sum3_1, _outp3_1), _outp3_1);
_outp4_1 = vmulq_f32(vrecpsq_f32(_sum4_1, _outp4_1), _outp4_1);
_outp5_1 = vmulq_f32(vrecpsq_f32(_sum5_1, _outp5_1), _outp5_1);
_outp6_1 = vmulq_f32(vrecpsq_f32(_sum6_1, _outp6_1), _outp6_1);
_outp7_1 = vmulq_f32(vrecpsq_f32(_sum7_1, _outp7_1), _outp7_1);
// _outp0_0 = vmulq_f32(vrecpsq_f32(_sum0_0, _outp0_0), _outp0_0);
// _outp1_0 = vmulq_f32(vrecpsq_f32(_sum1_0, _outp1_0), _outp1_0);
// _outp2_0 = vmulq_f32(vrecpsq_f32(_sum2_0, _outp2_0), _outp2_0);
// _outp3_0 = vmulq_f32(vrecpsq_f32(_sum3_0, _outp3_0), _outp3_0);
// _outp4_0 = vmulq_f32(vrecpsq_f32(_sum4_0, _outp4_0), _outp4_0);
// _outp5_0 = vmulq_f32(vrecpsq_f32(_sum5_0, _outp5_0), _outp5_0);
// _outp6_0 = vmulq_f32(vrecpsq_f32(_sum6_0, _outp6_0), _outp6_0);
// _outp7_0 = vmulq_f32(vrecpsq_f32(_sum7_0, _outp7_0), _outp7_0);
// _outp0_1 = vmulq_f32(vrecpsq_f32(_sum0_1, _outp0_1), _outp0_1);
// _outp1_1 = vmulq_f32(vrecpsq_f32(_sum1_1, _outp1_1), _outp1_1);
// _outp2_1 = vmulq_f32(vrecpsq_f32(_sum2_1, _outp2_1), _outp2_1);
// _outp3_1 = vmulq_f32(vrecpsq_f32(_sum3_1, _outp3_1), _outp3_1);
// _outp4_1 = vmulq_f32(vrecpsq_f32(_sum4_1, _outp4_1), _outp4_1);
// _outp5_1 = vmulq_f32(vrecpsq_f32(_sum5_1, _outp5_1), _outp5_1);
// _outp6_1 = vmulq_f32(vrecpsq_f32(_sum6_1, _outp6_1), _outp6_1);
// _outp7_1 = vmulq_f32(vrecpsq_f32(_sum7_1, _outp7_1), _outp7_1);
_sum0_0 = _outp0_0;
_sum1_0 = _outp1_0;
_sum2_0 = _outp2_0;
_sum3_0 = _outp3_0;
_sum4_0 = _outp4_0;
_sum5_0 = _outp5_0;
_sum6_0 = _outp6_0;
_sum7_0 = _outp7_0;
_sum0_1 = _outp0_1;
_sum1_1 = _outp1_1;
_sum2_1 = _outp2_1;
_sum3_1 = _outp3_1;
_sum4_1 = _outp4_1;
_sum5_1 = _outp5_1;
_sum6_1 = _outp6_1;
_sum7_1 = _outp7_1;
}
vst1q_f32(outptr0, _sum0_0);
vst1q_f32(outptr0 + 4, _sum1_0);
vst1q_f32(outptr0 + 8, _sum2_0);
vst1q_f32(outptr0 + 12, _sum3_0);
vst1q_f32(outptr0 + 16, _sum4_0);
vst1q_f32(outptr0 + 20, _sum5_0);
vst1q_f32(outptr0 + 24, _sum6_0);
vst1q_f32(outptr0 + 28, _sum7_0);
vst1q_f32(outptr1, _sum0_1);
vst1q_f32(outptr1 + 4, _sum1_1);
vst1q_f32(outptr1 + 8, _sum2_1);
vst1q_f32(outptr1 + 12, _sum3_1);
vst1q_f32(outptr1 + 16, _sum4_1);
vst1q_f32(outptr1 + 20, _sum5_1);
vst1q_f32(outptr1 + 24, _sum6_1);
vst1q_f32(outptr1 + 28, _sum7_1);
outptr0 += 32;
outptr1 += 32;
}
for (; i+3<size; i+=4)
{
const float* tmpptr = tmp.channel(i/8+(i%8)/4);
float32x4_t _sum0_0 = _bias0;
float32x4_t _sum1_0 = _bias0;
float32x4_t _sum2_0 = _bias0;
float32x4_t _sum3_0 = _bias0;
float32x4_t _sum0_1 = _bias1;
float32x4_t _sum1_1 = _bias1;
float32x4_t _sum2_1 = _bias1;
float32x4_t _sum3_1 = _bias1;
const float* kptr0 = (const float*)kernel + p * inch * 16;
const float* kptr1 = (const float*)kernel + (p+1) * inch * 16;
for (int q=0; q<inch; q++)
{
// const float* r0 = bottom_blob.channel(q);
// float32x4_t _r0 = vld1q_f32(r0 + i*4);
// float32x4_t _r1 = vld1q_f32(r0 + (i+1)*4);
// float32x4_t _r2 = vld1q_f32(r0 + (i+2)*4);
// float32x4_t _r3 = vld1q_f32(r0 + (i+3)*4);
float32x4_t _r0 = vld1q_f32( tmpptr );
float32x4_t _r1 = vld1q_f32( tmpptr + 4 );
float32x4_t _r2 = vld1q_f32( tmpptr + 8 );
float32x4_t _r3 = vld1q_f32( tmpptr + 12 );
float32x4_t _w0_0 = vld1q_f32( kptr0 );
float32x4_t _w1_0 = vld1q_f32( kptr0 + 4 );
float32x4_t _w2_0 = vld1q_f32( kptr0 + 8 );
float32x4_t _w3_0 = vld1q_f32( kptr0 + 12 );
float32x4_t _w0_1 = vld1q_f32( kptr1 );
float32x4_t _w1_1 = vld1q_f32( kptr1 + 4 );
float32x4_t _w2_1 = vld1q_f32( kptr1 + 8 );
float32x4_t _w3_1 = vld1q_f32( kptr1 + 12 );
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w0_0, _r0, 0);
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w1_0, _r0, 1);
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w2_0, _r0, 2);
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w3_0, _r0, 3);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w0_0, _r1, 0);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w1_0, _r1, 1);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w2_0, _r1, 2);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w3_0, _r1, 3);
_sum2_0 = vmlaq_laneq_f32(_sum2_0, _w0_0, _r2, 0);
_sum2_0 = vmlaq_laneq_f32(_sum2_0, _w1_0, _r2, 1);
_sum2_0 = vmlaq_laneq_f32(_sum2_0, _w2_0, _r2, 2);
_sum2_0 = vmlaq_laneq_f32(_sum2_0, _w3_0, _r2, 3);
_sum3_0 = vmlaq_laneq_f32(_sum3_0, _w0_0, _r3, 0);
_sum3_0 = vmlaq_laneq_f32(_sum3_0, _w1_0, _r3, 1);
_sum3_0 = vmlaq_laneq_f32(_sum3_0, _w2_0, _r3, 2);
_sum3_0 = vmlaq_laneq_f32(_sum3_0, _w3_0, _r3, 3);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w0_1, _r0, 0);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w1_1, _r0, 1);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w2_1, _r0, 2);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w3_1, _r0, 3);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w0_1, _r1, 0);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w1_1, _r1, 1);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w2_1, _r1, 2);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w3_1, _r1, 3);
_sum2_1 = vmlaq_laneq_f32(_sum2_1, _w0_1, _r2, 0);
_sum2_1 = vmlaq_laneq_f32(_sum2_1, _w1_1, _r2, 1);
_sum2_1 = vmlaq_laneq_f32(_sum2_1, _w2_1, _r2, 2);
_sum2_1 = vmlaq_laneq_f32(_sum2_1, _w3_1, _r2, 3);
_sum3_1 = vmlaq_laneq_f32(_sum3_1, _w0_1, _r3, 0);
_sum3_1 = vmlaq_laneq_f32(_sum3_1, _w1_1, _r3, 1);
_sum3_1 = vmlaq_laneq_f32(_sum3_1, _w2_1, _r3, 2);
_sum3_1 = vmlaq_laneq_f32(_sum3_1, _w3_1, _r3, 3);
tmpptr += 16;
kptr0 += 16;
kptr1 += 16;
}
if (activation_type == 1)
{
float32x4_t _zero = vdupq_n_f32(0.f);
_sum0_0 = vmaxq_f32(_sum0_0, _zero);
_sum1_0 = vmaxq_f32(_sum1_0, _zero);
_sum2_0 = vmaxq_f32(_sum2_0, _zero);
_sum3_0 = vmaxq_f32(_sum3_0, _zero);
_sum0_1 = vmaxq_f32(_sum0_1, _zero);
_sum1_1 = vmaxq_f32(_sum1_1, _zero);
_sum2_1 = vmaxq_f32(_sum2_1, _zero);
_sum3_1 = vmaxq_f32(_sum3_1, _zero);
}
else if (activation_type == 2)
{
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _slope = vdupq_n_f32(activation_params[0]);
_sum0_0 = vbslq_f32(vcleq_f32(_sum0_0, _zero), vmulq_f32(_sum0_0, _slope), _sum0_0);
_sum1_0 = vbslq_f32(vcleq_f32(_sum1_0, _zero), vmulq_f32(_sum1_0, _slope), _sum1_0);
_sum2_0 = vbslq_f32(vcleq_f32(_sum2_0, _zero), vmulq_f32(_sum2_0, _slope), _sum2_0);
_sum3_0 = vbslq_f32(vcleq_f32(_sum3_0, _zero), vmulq_f32(_sum3_0, _slope), _sum3_0);
_sum0_1 = vbslq_f32(vcleq_f32(_sum0_1, _zero), vmulq_f32(_sum0_1, _slope), _sum0_1);
_sum1_1 = vbslq_f32(vcleq_f32(_sum1_1, _zero), vmulq_f32(_sum1_1, _slope), _sum1_1);
_sum2_1 = vbslq_f32(vcleq_f32(_sum2_1, _zero), vmulq_f32(_sum2_1, _slope), _sum2_1);
_sum3_1 = vbslq_f32(vcleq_f32(_sum3_1, _zero), vmulq_f32(_sum3_1, _slope), _sum3_1);
}
else if (activation_type == 3)
{
float32x4_t _min = vdupq_n_f32(activation_params[0]);
float32x4_t _max = vdupq_n_f32(activation_params[1]);
_sum0_0 = vmaxq_f32(_sum0_0, _min);
_sum0_0 = vminq_f32(_sum0_0, _max);
_sum1_0 = vmaxq_f32(_sum1_0, _min);
_sum1_0 = vminq_f32(_sum1_0, _max);
_sum2_0 = vmaxq_f32(_sum2_0, _min);
_sum2_0 = vminq_f32(_sum2_0, _max);
_sum3_0 = vmaxq_f32(_sum3_0, _min);
_sum3_0 = vminq_f32(_sum3_0, _max);
_sum0_1 = vmaxq_f32(_sum0_1, _min);
_sum0_1 = vminq_f32(_sum0_1, _max);
_sum1_1 = vmaxq_f32(_sum1_1, _min);
_sum1_1 = vminq_f32(_sum1_1, _max);
_sum2_1 = vmaxq_f32(_sum2_1, _min);
_sum2_1 = vminq_f32(_sum2_1, _max);
_sum3_1 = vmaxq_f32(_sum3_1, _min);
_sum3_1 = vminq_f32(_sum3_1, _max);
}
else if (activation_type == 4)
{
float32x4_t _one = vdupq_n_f32(1.f);
_sum0_0 = vnegq_f32(_sum0_0);
_sum1_0 = vnegq_f32(_sum1_0);
_sum2_0 = vnegq_f32(_sum2_0);
_sum3_0 = vnegq_f32(_sum3_0);
_sum0_1 = vnegq_f32(_sum0_1);
_sum1_1 = vnegq_f32(_sum1_1);
_sum2_1 = vnegq_f32(_sum2_1);
_sum3_1 = vnegq_f32(_sum3_1);
_sum0_0 = exp_ps(_sum0_0);
_sum1_0 = exp_ps(_sum1_0);
_sum2_0 = exp_ps(_sum2_0);
_sum3_0 = exp_ps(_sum3_0);
_sum0_1 = exp_ps(_sum0_1);
_sum1_1 = exp_ps(_sum1_1);
_sum2_1 = exp_ps(_sum2_1);
_sum3_1 = exp_ps(_sum3_1);
_sum0_0 = vaddq_f32(_sum0_0, _one);
_sum1_0 = vaddq_f32(_sum1_0, _one);
_sum2_0 = vaddq_f32(_sum2_0, _one);
_sum3_0 = vaddq_f32(_sum3_0, _one);
_sum0_1 = vaddq_f32(_sum0_1, _one);
_sum1_1 = vaddq_f32(_sum1_1, _one);
_sum2_1 = vaddq_f32(_sum2_1, _one);
_sum3_1 = vaddq_f32(_sum3_1, _one);
float32x4_t _outp0_0 = vrecpeq_f32(_sum0_0);
float32x4_t _outp1_0 = vrecpeq_f32(_sum1_0);
float32x4_t _outp2_0 = vrecpeq_f32(_sum2_0);
float32x4_t _outp3_0 = vrecpeq_f32(_sum3_0);
float32x4_t _outp0_1 = vrecpeq_f32(_sum0_1);
float32x4_t _outp1_1 = vrecpeq_f32(_sum1_1);
float32x4_t _outp2_1 = vrecpeq_f32(_sum2_1);
float32x4_t _outp3_1 = vrecpeq_f32(_sum3_1);
_outp0_0 = vmulq_f32(vrecpsq_f32(_sum0_0, _outp0_0), _outp0_0);
_outp1_0 = vmulq_f32(vrecpsq_f32(_sum1_0, _outp1_0), _outp1_0);
_outp2_0 = vmulq_f32(vrecpsq_f32(_sum2_0, _outp0_0), _outp2_0);
_outp3_0 = vmulq_f32(vrecpsq_f32(_sum3_0, _outp1_0), _outp3_0);
_outp0_1 = vmulq_f32(vrecpsq_f32(_sum0_1, _outp0_1), _outp0_1);
_outp1_1 = vmulq_f32(vrecpsq_f32(_sum1_1, _outp1_1), _outp1_1);
_outp2_1 = vmulq_f32(vrecpsq_f32(_sum2_1, _outp0_1), _outp2_1);
_outp3_1 = vmulq_f32(vrecpsq_f32(_sum3_1, _outp1_1), _outp3_1);
// _outp0_0 = vmulq_f32(vrecpsq_f32(_sum0_0, _outp0_0), _outp0_0);
// _outp1_0 = vmulq_f32(vrecpsq_f32(_sum1_0, _outp1_0), _outp1_0);
// _outp2_0 = vmulq_f32(vrecpsq_f32(_sum2_0, _outp0_0), _outp2_0);
// _outp3_0 = vmulq_f32(vrecpsq_f32(_sum3_0, _outp1_0), _outp3_0);
// _outp0_1 = vmulq_f32(vrecpsq_f32(_sum0_1, _outp0_1), _outp0_1);
// _outp1_1 = vmulq_f32(vrecpsq_f32(_sum1_1, _outp1_1), _outp1_1);
// _outp2_1 = vmulq_f32(vrecpsq_f32(_sum2_1, _outp0_1), _outp2_1);
// _outp3_1 = vmulq_f32(vrecpsq_f32(_sum3_1, _outp1_1), _outp3_1);
_sum0_0 = _outp0_0;
_sum1_0 = _outp1_0;
_sum2_0 = _outp2_0;
_sum3_0 = _outp3_0;
_sum0_1 = _outp0_1;
_sum1_1 = _outp1_1;
_sum2_1 = _outp2_1;
_sum3_1 = _outp3_1;
}
vst1q_f32(outptr0, _sum0_0);
vst1q_f32(outptr0 + 4, _sum1_0);
vst1q_f32(outptr0 + 8, _sum2_0);
vst1q_f32(outptr0 + 12, _sum3_0);
vst1q_f32(outptr1, _sum0_1);
vst1q_f32(outptr1 + 4, _sum1_1);
vst1q_f32(outptr1 + 8, _sum2_1);
vst1q_f32(outptr1 + 12, _sum3_1);
outptr0 += 16;
outptr1 += 16;
}
for (; i+1<size; i+=2)
{
const float* tmpptr = tmp.channel(i/8+(i%8)/4 + (i%4)/2);
float32x4_t _sum0_0 = _bias0;
float32x4_t _sum1_0 = _bias0;
float32x4_t _sum0_1 = _bias1;
float32x4_t _sum1_1 = _bias1;
const float* kptr0 = (const float*)kernel + p * inch * 16;
const float* kptr1 = (const float*)kernel + (p+1) * inch * 16;
for (int q=0; q<inch; q++)
{
// const float* r0 = bottom_blob.channel(q);
// float32x4_t _r0 = vld1q_f32(r0 + i*4);
// float32x4_t _r1 = vld1q_f32(r0 + (i+1)*4);
float32x4_t _r0 = vld1q_f32( tmpptr );
float32x4_t _r1 = vld1q_f32( tmpptr + 4 );
float32x4_t _w0_0 = vld1q_f32( kptr0 );
float32x4_t _w1_0 = vld1q_f32( kptr0 + 4 );
float32x4_t _w2_0 = vld1q_f32( kptr0 + 8 );
float32x4_t _w3_0 = vld1q_f32( kptr0 + 12 );
float32x4_t _w0_1 = vld1q_f32( kptr1 );
float32x4_t _w1_1 = vld1q_f32( kptr1 + 4 );
float32x4_t _w2_1 = vld1q_f32( kptr1 + 8 );
float32x4_t _w3_1 = vld1q_f32( kptr1 + 12 );
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w0_0, _r0, 0);
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w1_0, _r0, 1);
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w2_0, _r0, 2);
_sum0_0 = vmlaq_laneq_f32(_sum0_0, _w3_0, _r0, 3);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w0_0, _r1, 0);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w1_0, _r1, 1);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w2_0, _r1, 2);
_sum1_0 = vmlaq_laneq_f32(_sum1_0, _w3_0, _r1, 3);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w0_1, _r0, 0);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w1_1, _r0, 1);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w2_1, _r0, 2);
_sum0_1 = vmlaq_laneq_f32(_sum0_1, _w3_1, _r0, 3);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w0_1, _r1, 0);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w1_1, _r1, 1);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w2_1, _r1, 2);
_sum1_1 = vmlaq_laneq_f32(_sum1_1, _w3_1, _r1, 3);
tmpptr += 8;
kptr0 += 16;
kptr1 += 16;
}
if (activation_type == 1)
{
float32x4_t _zero = vdupq_n_f32(0.f);
_sum0_0 = vmaxq_f32(_sum0_0, _zero);
_sum1_0 = vmaxq_f32(_sum1_0, _zero);
_sum0_1 = vmaxq_f32(_sum0_1, _zero);
_sum1_1 = vmaxq_f32(_sum1_1, _zero);
}
else if (activation_type == 2)
{
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _slope = vdupq_n_f32(activation_params[0]);
_sum0_0 = vbslq_f32(vcleq_f32(_sum0_0, _zero), vmulq_f32(_sum0_0, _slope), _sum0_0);
_sum1_0 = vbslq_f32(vcleq_f32(_sum1_0, _zero), vmulq_f32(_sum1_0, _slope), _sum1_0);
_sum0_1 = vbslq_f32(vcleq_f32(_sum0_1, _zero), vmulq_f32(_sum0_1, _slope), _sum0_1);
_sum1_1 = vbslq_f32(vcleq_f32(_sum1_1, _zero), vmulq_f32(_sum1_1, _slope), _sum1_1);
}
else if (activation_type == 3)
{
float32x4_t _min = vdupq_n_f32(activation_params[0]);
float32x4_t _max = vdupq_n_f32(activation_params[1]);
_sum0_0 = vmaxq_f32(_sum0_0, _min);
_sum0_0 = vminq_f32(_sum0_0, _max);
_sum1_0 = vmaxq_f32(_sum1_0, _min);
_sum1_0 = vminq_f32(_sum1_0, _max);
_sum0_1 = vmaxq_f32(_sum0_1, _min);
_sum0_1 = vminq_f32(_sum0_1, _max);
_sum1_1 = vmaxq_f32(_sum1_1, _min);
_sum1_1 = vminq_f32(_sum1_1, _max);
}
else if (activation_type == 4)
{
float32x4_t _one = vdupq_n_f32(1.f);
_sum0_0 = vnegq_f32(_sum0_0);
_sum1_0 = vnegq_f32(_sum1_0);
_sum0_1 = vnegq_f32(_sum0_1);
_sum1_1 = vnegq_f32(_sum1_1);
_sum0_0 = exp_ps(_sum0_0);
_sum1_0 = exp_ps(_sum1_0);
_sum0_1 = exp_ps(_sum0_1);
_sum1_1 = exp_ps(_sum1_1);
_sum0_0 = vaddq_f32(_sum0_0, _one);
_sum1_0 = vaddq_f32(_sum1_0, _one);
_sum0_1 = vaddq_f32(_sum0_1, _one);
_sum1_1 = vaddq_f32(_sum1_1, _one);
float32x4_t _outp0_0 = vrecpeq_f32(_sum0_0);
float32x4_t _outp1_0 = vrecpeq_f32(_sum1_0);
float32x4_t _outp0_1 = vrecpeq_f32(_sum0_1);
float32x4_t _outp1_1 = vrecpeq_f32(_sum1_1);
_outp0_0 = vmulq_f32(vrecpsq_f32(_sum0_0, _outp0_0), _outp0_0);
_outp1_0 = vmulq_f32(vrecpsq_f32(_sum1_0, _outp1_0), _outp1_0);
_outp0_1 = vmulq_f32(vrecpsq_f32(_sum0_1, _outp0_1), _outp0_1);
_outp1_1 = vmulq_f32(vrecpsq_f32(_sum1_1, _outp1_1), _outp1_1);
// _outp0_0 = vmulq_f32(vrecpsq_f32(_sum0_0, _outp0_0), _outp0_0);
// _outp1_0 = vmulq_f32(vrecpsq_f32(_sum1_0, _outp1_0), _outp1_0);
// _outp0_1 = vmulq_f32(vrecpsq_f32(_sum0_1, _outp0_1), _outp0_1);
// _outp1_1 = vmulq_f32(vrecpsq_f32(_sum1_1, _outp1_1), _outp1_1);
_sum0_0 = _outp0_0;
_sum1_0 = _outp1_0;
_sum0_1 = _outp0_1;
_sum1_1 = _outp1_1;
}
vst1q_f32(outptr0, _sum0_0);
vst1q_f32(outptr0 + 4, _sum1_0);
vst1q_f32(outptr1, _sum0_1);
vst1q_f32(outptr1 + 4, _sum1_1);
outptr0 += 8;
outptr1 += 8;
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i/8+(i%8)/4 + (i%4)/2 + i%2);
float32x4_t _sum_0 = _bias0;
float32x4_t _sum_1 = _bias1;
const float* kptr0 = (const float*)kernel + p * inch * 16;
const float* kptr1 = (const float*)kernel + (p+1) * inch * 16;
for (int q=0; q<inch; q++)
{
// const float* r0 = bottom_blob.channel(q);
// float32x4_t _val = vld1q_f32(r0 + i*4);
float32x4_t _val = vld1q_f32( tmpptr );
float32x4_t _w0_0 = vld1q_f32( kptr0 );
float32x4_t _w1_0 = vld1q_f32( kptr0 + 4 );
float32x4_t _w2_0 = vld1q_f32( kptr0 + 8 );
float32x4_t _w3_0 = vld1q_f32( kptr0 + 12 );
float32x4_t _w0_1 = vld1q_f32( kptr1 );
float32x4_t _w1_1 = vld1q_f32( kptr1 + 4 );
float32x4_t _w2_1 = vld1q_f32( kptr1 + 8 );
float32x4_t _w3_1 = vld1q_f32( kptr1 + 12 );
_sum_0 = vmlaq_laneq_f32(_sum_0, _w0_0, _val, 0);
_sum_0 = vmlaq_laneq_f32(_sum_0, _w1_0, _val, 1);
_sum_0 = vmlaq_laneq_f32(_sum_0, _w2_0, _val, 2);
_sum_0 = vmlaq_laneq_f32(_sum_0, _w3_0, _val, 3);
_sum_1 = vmlaq_laneq_f32(_sum_1, _w0_1, _val, 0);
_sum_1 = vmlaq_laneq_f32(_sum_1, _w1_1, _val, 1);
_sum_1 = vmlaq_laneq_f32(_sum_1, _w2_1, _val, 2);
_sum_1 = vmlaq_laneq_f32(_sum_1, _w3_1, _val, 3);
tmpptr += 4;
kptr0 += 16;
kptr1 += 16;
}
if (activation_type == 1)
{
float32x4_t _zero = vdupq_n_f32(0.f);
_sum_0 = vmaxq_f32(_sum_0, _zero);
_sum_1 = vmaxq_f32(_sum_1, _zero);
}
else if (activation_type == 2)
{
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _slope = vdupq_n_f32(activation_params[0]);
_sum_0 = vbslq_f32(vcleq_f32(_sum_0, _zero), vmulq_f32(_sum_0, _slope), _sum_0);
_sum_1 = vbslq_f32(vcleq_f32(_sum_1, _zero), vmulq_f32(_sum_1, _slope), _sum_1);
}
else if (activation_type == 3)
{
float32x4_t _min = vdupq_n_f32(activation_params[0]);
float32x4_t _max = vdupq_n_f32(activation_params[1]);
_sum_0 = vmaxq_f32(_sum_0, _min);
_sum_0 = vminq_f32(_sum_0, _max);
_sum_1 = vmaxq_f32(_sum_1, _min);
_sum_1 = vminq_f32(_sum_1, _max);
}
else if (activation_type == 4)
{
float32x4_t _one = vdupq_n_f32(1.f);
_sum_0 = vnegq_f32(_sum_0);
_sum_1 = vnegq_f32(_sum_1);
_sum_0 = exp_ps(_sum_0);
_sum_1 = exp_ps(_sum_1);
_sum_0 = vaddq_f32(_sum_0, _one);
_sum_1 = vaddq_f32(_sum_1, _one);
float32x4_t _outp_0 = vrecpeq_f32(_sum_0);
float32x4_t _outp_1 = vrecpeq_f32(_sum_1);
_outp_0 = vmulq_f32(vrecpsq_f32(_sum_0, _outp_0), _outp_0);
_outp_1 = vmulq_f32(vrecpsq_f32(_sum_1, _outp_1), _outp_1);
// _outp_0 = vmulq_f32(vrecpsq_f32(_sum_0, _outp_0), _outp_0);
// _outp_1 = vmulq_f32(vrecpsq_f32(_sum_1, _outp_1), _outp_1);
_sum_0 = _outp_0;
_sum_1 = _outp_1;
}
vst1q_f32(outptr0, _sum_0);
vst1q_f32(outptr1, _sum_1);
outptr0 += 4;
outptr1 += 4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32(bias + p * 4) : vdupq_n_f32(0.f);
float* outptr = out;
int i=0;
for (; i+7<size; i+=8)
{
const float* tmpptr = tmp.channel(i/8);
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _sum2 = _bias0;
float32x4_t _sum3 = _bias0;
float32x4_t _sum4 = _bias0;
float32x4_t _sum5 = _bias0;
float32x4_t _sum6 = _bias0;
float32x4_t _sum7 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 16;
for (int q=0; q<inch; q++)
{
// asm volatile("nop\nnop\nnop\n" : : :);
// const float* r0 = bottom_blob.channel(q);
// float32x4_t _r0 = vld1q_f32(r0 + i*4);
// float32x4_t _r1 = vld1q_f32(r0 + (i+1)*4);
// float32x4_t _r2 = vld1q_f32(r0 + (i+2)*4);
// float32x4_t _r3 = vld1q_f32(r0 + (i+3)*4);
float32x4_t _r0 = vld1q_f32( tmpptr );
float32x4_t _r1 = vld1q_f32( tmpptr + 4 );
float32x4_t _r2 = vld1q_f32( tmpptr + 8 );
float32x4_t _r3 = vld1q_f32( tmpptr + 12 );
float32x4_t _r4 = vld1q_f32( tmpptr + 16 );
float32x4_t _r5 = vld1q_f32( tmpptr + 20 );
float32x4_t _r6 = vld1q_f32( tmpptr + 24 );
float32x4_t _r7 = vld1q_f32( tmpptr + 28 );
float32x4_t _w0 = vld1q_f32( kptr );
float32x4_t _w1 = vld1q_f32( kptr + 4 );
float32x4_t _w2 = vld1q_f32( kptr + 8 );
float32x4_t _w3 = vld1q_f32( kptr + 12 );
#if __aarch64__
_sum0 = vmlaq_laneq_f32(_sum0, _w0, _r0, 0);
_sum0 = vmlaq_laneq_f32(_sum0, _w1, _r0, 1);
_sum0 = vmlaq_laneq_f32(_sum0, _w2, _r0, 2);
_sum0 = vmlaq_laneq_f32(_sum0, _w3, _r0, 3);
_sum1 = vmlaq_laneq_f32(_sum1, _w0, _r1, 0);
_sum1 = vmlaq_laneq_f32(_sum1, _w1, _r1, 1);
_sum1 = vmlaq_laneq_f32(_sum1, _w2, _r1, 2);
_sum1 = vmlaq_laneq_f32(_sum1, _w3, _r1, 3);
_sum2 = vmlaq_laneq_f32(_sum2, _w0, _r2, 0);
_sum2 = vmlaq_laneq_f32(_sum2, _w1, _r2, 1);
_sum2 = vmlaq_laneq_f32(_sum2, _w2, _r2, 2);
_sum2 = vmlaq_laneq_f32(_sum2, _w3, _r2, 3);
_sum3 = vmlaq_laneq_f32(_sum3, _w0, _r3, 0);
_sum3 = vmlaq_laneq_f32(_sum3, _w1, _r3, 1);
_sum3 = vmlaq_laneq_f32(_sum3, _w2, _r3, 2);
_sum3 = vmlaq_laneq_f32(_sum3, _w3, _r3, 3);
_sum4 = vmlaq_laneq_f32(_sum4, _w0, _r4, 0);
_sum4 = vmlaq_laneq_f32(_sum4, _w1, _r4, 1);
_sum4 = vmlaq_laneq_f32(_sum4, _w2, _r4, 2);
_sum4 = vmlaq_laneq_f32(_sum4, _w3, _r4, 3);
_sum5 = vmlaq_laneq_f32(_sum5, _w0, _r5, 0);
_sum5 = vmlaq_laneq_f32(_sum5, _w1, _r5, 1);
_sum5 = vmlaq_laneq_f32(_sum5, _w2, _r5, 2);
_sum5 = vmlaq_laneq_f32(_sum5, _w3, _r5, 3);
_sum6 = vmlaq_laneq_f32(_sum6, _w0, _r6, 0);
_sum6 = vmlaq_laneq_f32(_sum6, _w1, _r6, 1);
_sum6 = vmlaq_laneq_f32(_sum6, _w2, _r6, 2);
_sum6 = vmlaq_laneq_f32(_sum6, _w3, _r6, 3);
_sum7 = vmlaq_laneq_f32(_sum7, _w0, _r7, 0);
_sum7 = vmlaq_laneq_f32(_sum7, _w1, _r7, 1);
_sum7 = vmlaq_laneq_f32(_sum7, _w2, _r7, 2);
_sum7 = vmlaq_laneq_f32(_sum7, _w3, _r7, 3);
#else
_sum0 = vmlaq_lane_f32(_sum0, _w0, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _w1, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _w2, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _w3, vget_high_f32(_r0), 1);
_sum1 = vmlaq_lane_f32(_sum1, _w0, vget_low_f32(_r1), 0);
_sum1 = vmlaq_lane_f32(_sum1, _w1, vget_low_f32(_r1), 1);
_sum1 = vmlaq_lane_f32(_sum1, _w2, vget_high_f32(_r1), 0);
_sum1 = vmlaq_lane_f32(_sum1, _w3, vget_high_f32(_r1), 1);
_sum2 = vmlaq_lane_f32(_sum2, _w0, vget_low_f32(_r2), 0);
_sum2 = vmlaq_lane_f32(_sum2, _w1, vget_low_f32(_r2), 1);
_sum2 = vmlaq_lane_f32(_sum2, _w2, vget_high_f32(_r2), 0);
_sum2 = vmlaq_lane_f32(_sum2, _w3, vget_high_f32(_r2), 1);
_sum3 = vmlaq_lane_f32(_sum3, _w0, vget_low_f32(_r3), 0);
_sum3 = vmlaq_lane_f32(_sum3, _w1, vget_low_f32(_r3), 1);
_sum3 = vmlaq_lane_f32(_sum3, _w2, vget_high_f32(_r3), 0);
_sum3 = vmlaq_lane_f32(_sum3, _w3, vget_high_f32(_r3), 1);
_sum4 = vmlaq_lane_f32(_sum4, _w0, vget_low_f32(_r4), 0);
_sum4 = vmlaq_lane_f32(_sum4, _w1, vget_low_f32(_r4), 1);
_sum4 = vmlaq_lane_f32(_sum4, _w2, vget_high_f32(_r4), 0);
_sum4 = vmlaq_lane_f32(_sum4, _w3, vget_high_f32(_r4), 1);
_sum5 = vmlaq_lane_f32(_sum5, _w0, vget_low_f32(_r5), 0);
_sum5 = vmlaq_lane_f32(_sum5, _w1, vget_low_f32(_r5), 1);
_sum5 = vmlaq_lane_f32(_sum5, _w2, vget_high_f32(_r5), 0);
_sum5 = vmlaq_lane_f32(_sum5, _w3, vget_high_f32(_r5), 1);
_sum6 = vmlaq_lane_f32(_sum6, _w0, vget_low_f32(_r6), 0);
_sum6 = vmlaq_lane_f32(_sum6, _w1, vget_low_f32(_r6), 1);
_sum6 = vmlaq_lane_f32(_sum6, _w2, vget_high_f32(_r6), 0);
_sum6 = vmlaq_lane_f32(_sum6, _w3, vget_high_f32(_r6), 1);
_sum7 = vmlaq_lane_f32(_sum7, _w0, vget_low_f32(_r7), 0);
_sum7 = vmlaq_lane_f32(_sum7, _w1, vget_low_f32(_r7), 1);
_sum7 = vmlaq_lane_f32(_sum7, _w2, vget_high_f32(_r7), 0);
_sum7 = vmlaq_lane_f32(_sum7, _w3, vget_high_f32(_r7), 1);
#endif
tmpptr += 32;
kptr += 16;
}
if (activation_type == 1)
{
float32x4_t _zero = vdupq_n_f32(0.f);
_sum0 = vmaxq_f32(_sum0, _zero);
_sum1 = vmaxq_f32(_sum1, _zero);
_sum2 = vmaxq_f32(_sum2, _zero);
_sum3 = vmaxq_f32(_sum3, _zero);
_sum4 = vmaxq_f32(_sum4, _zero);
_sum5 = vmaxq_f32(_sum5, _zero);
_sum6 = vmaxq_f32(_sum6, _zero);
_sum7 = vmaxq_f32(_sum7, _zero);
}
else if (activation_type == 2)
{
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _slope = vdupq_n_f32(activation_params[0]);
_sum0 = vbslq_f32(vcleq_f32(_sum0, _zero), vmulq_f32(_sum0, _slope), _sum0);
_sum1 = vbslq_f32(vcleq_f32(_sum1, _zero), vmulq_f32(_sum1, _slope), _sum1);
_sum2 = vbslq_f32(vcleq_f32(_sum2, _zero), vmulq_f32(_sum2, _slope), _sum2);
_sum3 = vbslq_f32(vcleq_f32(_sum3, _zero), vmulq_f32(_sum3, _slope), _sum3);
_sum4 = vbslq_f32(vcleq_f32(_sum4, _zero), vmulq_f32(_sum4, _slope), _sum4);
_sum5 = vbslq_f32(vcleq_f32(_sum5, _zero), vmulq_f32(_sum5, _slope), _sum5);
_sum6 = vbslq_f32(vcleq_f32(_sum6, _zero), vmulq_f32(_sum6, _slope), _sum6);
_sum7 = vbslq_f32(vcleq_f32(_sum7, _zero), vmulq_f32(_sum7, _slope), _sum7);
}
else if (activation_type == 3)
{
float32x4_t _min = vdupq_n_f32(activation_params[0]);
float32x4_t _max = vdupq_n_f32(activation_params[1]);
_sum0 = vmaxq_f32(_sum0, _min);
_sum0 = vminq_f32(_sum0, _max);
_sum1 = vmaxq_f32(_sum1, _min);
_sum1 = vminq_f32(_sum1, _max);
_sum2 = vmaxq_f32(_sum2, _min);
_sum2 = vminq_f32(_sum2, _max);
_sum3 = vmaxq_f32(_sum3, _min);
_sum3 = vminq_f32(_sum3, _max);
_sum4 = vmaxq_f32(_sum4, _min);
_sum4 = vminq_f32(_sum4, _max);
_sum5 = vmaxq_f32(_sum5, _min);
_sum5 = vminq_f32(_sum5, _max);
_sum6 = vmaxq_f32(_sum6, _min);
_sum6 = vminq_f32(_sum6, _max);
_sum7 = vmaxq_f32(_sum7, _min);
_sum7 = vminq_f32(_sum7, _max);
}
else if (activation_type == 4)
{
float32x4_t _one = vdupq_n_f32(1.f);
_sum0 = vnegq_f32(_sum0);
_sum1 = vnegq_f32(_sum1);
_sum2 = vnegq_f32(_sum2);
_sum3 = vnegq_f32(_sum3);
_sum4 = vnegq_f32(_sum4);
_sum5 = vnegq_f32(_sum5);
_sum6 = vnegq_f32(_sum6);
_sum7 = vnegq_f32(_sum7);
_sum0 = exp_ps(_sum0);
_sum1 = exp_ps(_sum1);
_sum2 = exp_ps(_sum2);
_sum3 = exp_ps(_sum3);
_sum4 = exp_ps(_sum4);
_sum5 = exp_ps(_sum5);
_sum6 = exp_ps(_sum6);
_sum7 = exp_ps(_sum7);
_sum0 = vaddq_f32(_sum0, _one);
_sum1 = vaddq_f32(_sum1, _one);
_sum2 = vaddq_f32(_sum2, _one);
_sum3 = vaddq_f32(_sum3, _one);
_sum4 = vaddq_f32(_sum4, _one);
_sum5 = vaddq_f32(_sum5, _one);
_sum6 = vaddq_f32(_sum6, _one);
_sum7 = vaddq_f32(_sum7, _one);
float32x4_t _outp0 = vrecpeq_f32(_sum0);
float32x4_t _outp1 = vrecpeq_f32(_sum1);
float32x4_t _outp2 = vrecpeq_f32(_sum2);
float32x4_t _outp3 = vrecpeq_f32(_sum3);
float32x4_t _outp4 = vrecpeq_f32(_sum4);
float32x4_t _outp5 = vrecpeq_f32(_sum5);
float32x4_t _outp6 = vrecpeq_f32(_sum6);
float32x4_t _outp7 = vrecpeq_f32(_sum7);
_outp0 = vmulq_f32(vrecpsq_f32(_sum0, _outp0), _outp0);
_outp1 = vmulq_f32(vrecpsq_f32(_sum1, _outp1), _outp1);
_outp2 = vmulq_f32(vrecpsq_f32(_sum2, _outp0), _outp2);
_outp3 = vmulq_f32(vrecpsq_f32(_sum3, _outp1), _outp3);
_outp4 = vmulq_f32(vrecpsq_f32(_sum4, _outp4), _outp4);
_outp5 = vmulq_f32(vrecpsq_f32(_sum5, _outp5), _outp5);
_outp6 = vmulq_f32(vrecpsq_f32(_sum6, _outp6), _outp6);
_outp7 = vmulq_f32(vrecpsq_f32(_sum7, _outp7), _outp7);
// _outp0 = vmulq_f32(vrecpsq_f32(_sum0, _outp0), _outp0);
// _outp1 = vmulq_f32(vrecpsq_f32(_sum1, _outp1), _outp1);
// _outp2 = vmulq_f32(vrecpsq_f32(_sum2, _outp0), _outp2);
// _outp3 = vmulq_f32(vrecpsq_f32(_sum3, _outp1), _outp3);
// _outp4 = vmulq_f32(vrecpsq_f32(_sum4, _outp4), _outp4);
// _outp5 = vmulq_f32(vrecpsq_f32(_sum5, _outp5), _outp5);
// _outp6 = vmulq_f32(vrecpsq_f32(_sum6, _outp6), _outp6);
// _outp7 = vmulq_f32(vrecpsq_f32(_sum7, _outp7), _outp7);
_sum0 = _outp0;
_sum1 = _outp1;
_sum2 = _outp2;
_sum3 = _outp3;
_sum4 = _outp4;
_sum5 = _outp5;
_sum6 = _outp6;
_sum7 = _outp7;
}
vst1q_f32(outptr, _sum0);
vst1q_f32(outptr + 4, _sum1);
vst1q_f32(outptr + 8, _sum2);
vst1q_f32(outptr + 12, _sum3);
vst1q_f32(outptr + 16, _sum4);
vst1q_f32(outptr + 20, _sum5);
vst1q_f32(outptr + 24, _sum6);
vst1q_f32(outptr + 28, _sum7);
outptr += 32;
}
for (; i+3<size; i+=4)
{
const float* tmpptr = tmp.channel(i/8+(i%8)/4);
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _sum2 = _bias0;
float32x4_t _sum3 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 16;
for (int q=0; q<inch; q++)
{
// const float* r0 = bottom_blob.channel(q);
// float32x4_t _r0 = vld1q_f32(r0 + i*4);
// float32x4_t _r1 = vld1q_f32(r0 + (i+1)*4);
// float32x4_t _r2 = vld1q_f32(r0 + (i+2)*4);
// float32x4_t _r3 = vld1q_f32(r0 + (i+3)*4);
float32x4_t _r0 = vld1q_f32( tmpptr );
float32x4_t _r1 = vld1q_f32( tmpptr + 4 );
float32x4_t _r2 = vld1q_f32( tmpptr + 8 );
float32x4_t _r3 = vld1q_f32( tmpptr + 12 );
float32x4_t _w0 = vld1q_f32( kptr );
float32x4_t _w1 = vld1q_f32( kptr + 4 );
float32x4_t _w2 = vld1q_f32( kptr + 8 );
float32x4_t _w3 = vld1q_f32( kptr + 12 );
#if __aarch64__
_sum0 = vmlaq_laneq_f32(_sum0, _w0, _r0, 0);
_sum0 = vmlaq_laneq_f32(_sum0, _w1, _r0, 1);
_sum0 = vmlaq_laneq_f32(_sum0, _w2, _r0, 2);
_sum0 = vmlaq_laneq_f32(_sum0, _w3, _r0, 3);
_sum1 = vmlaq_laneq_f32(_sum1, _w0, _r1, 0);
_sum1 = vmlaq_laneq_f32(_sum1, _w1, _r1, 1);
_sum1 = vmlaq_laneq_f32(_sum1, _w2, _r1, 2);
_sum1 = vmlaq_laneq_f32(_sum1, _w3, _r1, 3);
_sum2 = vmlaq_laneq_f32(_sum2, _w0, _r2, 0);
_sum2 = vmlaq_laneq_f32(_sum2, _w1, _r2, 1);
_sum2 = vmlaq_laneq_f32(_sum2, _w2, _r2, 2);
_sum2 = vmlaq_laneq_f32(_sum2, _w3, _r2, 3);
_sum3 = vmlaq_laneq_f32(_sum3, _w0, _r3, 0);
_sum3 = vmlaq_laneq_f32(_sum3, _w1, _r3, 1);
_sum3 = vmlaq_laneq_f32(_sum3, _w2, _r3, 2);
_sum3 = vmlaq_laneq_f32(_sum3, _w3, _r3, 3);
#else
_sum0 = vmlaq_lane_f32(_sum0, _w0, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _w1, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _w2, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _w3, vget_high_f32(_r0), 1);
_sum1 = vmlaq_lane_f32(_sum1, _w0, vget_low_f32(_r1), 0);
_sum1 = vmlaq_lane_f32(_sum1, _w1, vget_low_f32(_r1), 1);
_sum1 = vmlaq_lane_f32(_sum1, _w2, vget_high_f32(_r1), 0);
_sum1 = vmlaq_lane_f32(_sum1, _w3, vget_high_f32(_r1), 1);
_sum2 = vmlaq_lane_f32(_sum2, _w0, vget_low_f32(_r2), 0);
_sum2 = vmlaq_lane_f32(_sum2, _w1, vget_low_f32(_r2), 1);
_sum2 = vmlaq_lane_f32(_sum2, _w2, vget_high_f32(_r2), 0);
_sum2 = vmlaq_lane_f32(_sum2, _w3, vget_high_f32(_r2), 1);
_sum3 = vmlaq_lane_f32(_sum3, _w0, vget_low_f32(_r3), 0);
_sum3 = vmlaq_lane_f32(_sum3, _w1, vget_low_f32(_r3), 1);
_sum3 = vmlaq_lane_f32(_sum3, _w2, vget_high_f32(_r3), 0);
_sum3 = vmlaq_lane_f32(_sum3, _w3, vget_high_f32(_r3), 1);
#endif
tmpptr += 16;
kptr += 16;
}
if (activation_type == 1)
{
float32x4_t _zero = vdupq_n_f32(0.f);
_sum0 = vmaxq_f32(_sum0, _zero);
_sum1 = vmaxq_f32(_sum1, _zero);
_sum2 = vmaxq_f32(_sum2, _zero);
_sum3 = vmaxq_f32(_sum3, _zero);
}
else if (activation_type == 2)
{
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _slope = vdupq_n_f32(activation_params[0]);
_sum0 = vbslq_f32(vcleq_f32(_sum0, _zero), vmulq_f32(_sum0, _slope), _sum0);
_sum1 = vbslq_f32(vcleq_f32(_sum1, _zero), vmulq_f32(_sum1, _slope), _sum1);
_sum2 = vbslq_f32(vcleq_f32(_sum2, _zero), vmulq_f32(_sum2, _slope), _sum2);
_sum3 = vbslq_f32(vcleq_f32(_sum3, _zero), vmulq_f32(_sum3, _slope), _sum3);
}
else if (activation_type == 3)
{
float32x4_t _min = vdupq_n_f32(activation_params[0]);
float32x4_t _max = vdupq_n_f32(activation_params[1]);
_sum0 = vmaxq_f32(_sum0, _min);
_sum0 = vminq_f32(_sum0, _max);
_sum1 = vmaxq_f32(_sum1, _min);
_sum1 = vminq_f32(_sum1, _max);
_sum2 = vmaxq_f32(_sum2, _min);
_sum2 = vminq_f32(_sum2, _max);
_sum3 = vmaxq_f32(_sum3, _min);
_sum3 = vminq_f32(_sum3, _max);
}
else if (activation_type == 4)
{
float32x4_t _one = vdupq_n_f32(1.f);
_sum0 = vnegq_f32(_sum0);
_sum1 = vnegq_f32(_sum1);
_sum2 = vnegq_f32(_sum2);
_sum3 = vnegq_f32(_sum3);
_sum0 = exp_ps(_sum0);
_sum1 = exp_ps(_sum1);
_sum2 = exp_ps(_sum2);
_sum3 = exp_ps(_sum3);
_sum0 = vaddq_f32(_sum0, _one);
_sum1 = vaddq_f32(_sum1, _one);
_sum2 = vaddq_f32(_sum2, _one);
_sum3 = vaddq_f32(_sum3, _one);
float32x4_t _outp0 = vrecpeq_f32(_sum0);
float32x4_t _outp1 = vrecpeq_f32(_sum1);
float32x4_t _outp2 = vrecpeq_f32(_sum2);
float32x4_t _outp3 = vrecpeq_f32(_sum3);
_outp0 = vmulq_f32(vrecpsq_f32(_sum0, _outp0), _outp0);
_outp1 = vmulq_f32(vrecpsq_f32(_sum1, _outp1), _outp1);
_outp2 = vmulq_f32(vrecpsq_f32(_sum2, _outp0), _outp2);
_outp3 = vmulq_f32(vrecpsq_f32(_sum3, _outp1), _outp3);
// _outp0 = vmulq_f32(vrecpsq_f32(_sum0, _outp0), _outp0);
// _outp1 = vmulq_f32(vrecpsq_f32(_sum1, _outp1), _outp1);
// _outp2 = vmulq_f32(vrecpsq_f32(_sum2, _outp0), _outp2);
// _outp3 = vmulq_f32(vrecpsq_f32(_sum3, _outp1), _outp3);
_sum0 = _outp0;
_sum1 = _outp1;
_sum2 = _outp2;
_sum3 = _outp3;
}
vst1q_f32(outptr, _sum0);
vst1q_f32(outptr + 4, _sum1);
vst1q_f32(outptr + 8, _sum2);
vst1q_f32(outptr + 12, _sum3);
outptr += 16;
}
for (; i+1<size; i+=2)
{
const float* tmpptr = tmp.channel(i/8+(i%8)/4 + (i%4)/2);
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 16;
for (int q=0; q<inch; q++)
{
// const float* r0 = bottom_blob.channel(q);
// float32x4_t _r0 = vld1q_f32(r0 + i*4);
// float32x4_t _r1 = vld1q_f32(r0 + (i+1)*4);
float32x4_t _r0 = vld1q_f32( tmpptr );
float32x4_t _r1 = vld1q_f32( tmpptr + 4 );
float32x4_t _w0 = vld1q_f32( kptr );
float32x4_t _w1 = vld1q_f32( kptr + 4 );
float32x4_t _w2 = vld1q_f32( kptr + 8 );
float32x4_t _w3 = vld1q_f32( kptr + 12 );
#if __aarch64__
_sum0 = vmlaq_laneq_f32(_sum0, _w0, _r0, 0);
_sum0 = vmlaq_laneq_f32(_sum0, _w1, _r0, 1);
_sum0 = vmlaq_laneq_f32(_sum0, _w2, _r0, 2);
_sum0 = vmlaq_laneq_f32(_sum0, _w3, _r0, 3);
_sum1 = vmlaq_laneq_f32(_sum1, _w0, _r1, 0);
_sum1 = vmlaq_laneq_f32(_sum1, _w1, _r1, 1);
_sum1 = vmlaq_laneq_f32(_sum1, _w2, _r1, 2);
_sum1 = vmlaq_laneq_f32(_sum1, _w3, _r1, 3);
#else
_sum0 = vmlaq_lane_f32(_sum0, _w0, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _w1, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _w2, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _w3, vget_high_f32(_r0), 1);
_sum1 = vmlaq_lane_f32(_sum1, _w0, vget_low_f32(_r1), 0);
_sum1 = vmlaq_lane_f32(_sum1, _w1, vget_low_f32(_r1), 1);
_sum1 = vmlaq_lane_f32(_sum1, _w2, vget_high_f32(_r1), 0);
_sum1 = vmlaq_lane_f32(_sum1, _w3, vget_high_f32(_r1), 1);
#endif
tmpptr += 8;
kptr += 16;
}
if (activation_type == 1)
{
float32x4_t _zero = vdupq_n_f32(0.f);
_sum0 = vmaxq_f32(_sum0, _zero);
_sum1 = vmaxq_f32(_sum1, _zero);
}
else if (activation_type == 2)
{
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _slope = vdupq_n_f32(activation_params[0]);
_sum0 = vbslq_f32(vcleq_f32(_sum0, _zero), vmulq_f32(_sum0, _slope), _sum0);
_sum1 = vbslq_f32(vcleq_f32(_sum1, _zero), vmulq_f32(_sum1, _slope), _sum1);
}
else if (activation_type == 3)
{
float32x4_t _min = vdupq_n_f32(activation_params[0]);
float32x4_t _max = vdupq_n_f32(activation_params[1]);
_sum0 = vmaxq_f32(_sum0, _min);
_sum0 = vminq_f32(_sum0, _max);
_sum1 = vmaxq_f32(_sum1, _min);
_sum1 = vminq_f32(_sum1, _max);
}
else if (activation_type == 4)
{
float32x4_t _one = vdupq_n_f32(1.f);
_sum0 = vnegq_f32(_sum0);
_sum1 = vnegq_f32(_sum1);
_sum0 = exp_ps(_sum0);
_sum1 = exp_ps(_sum1);
_sum0 = vaddq_f32(_sum0, _one);
_sum1 = vaddq_f32(_sum1, _one);
float32x4_t _outp0 = vrecpeq_f32(_sum0);
float32x4_t _outp1 = vrecpeq_f32(_sum1);
_outp0 = vmulq_f32(vrecpsq_f32(_sum0, _outp0), _outp0);
_outp1 = vmulq_f32(vrecpsq_f32(_sum1, _outp1), _outp1);
// _outp0 = vmulq_f32(vrecpsq_f32(_sum0, _outp0), _outp0);
// _outp1 = vmulq_f32(vrecpsq_f32(_sum1, _outp1), _outp1);
_sum0 = _outp0;
_sum1 = _outp1;
}
vst1q_f32(outptr, _sum0);
vst1q_f32(outptr + 4, _sum1);
outptr += 8;
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i/8+(i%8)/4 + (i%4)/2 + i%2);
float32x4_t _sum = _bias0;
const float* kptr = (const float*)kernel + p * inch * 16;
for (int q=0; q<inch; q++)
{
// const float* r0 = bottom_blob.channel(q);
// float32x4_t _val = vld1q_f32(r0 + i*4);
float32x4_t _val = vld1q_f32( tmpptr );
float32x4_t _w0 = vld1q_f32( kptr );
float32x4_t _w1 = vld1q_f32( kptr + 4 );
float32x4_t _w2 = vld1q_f32( kptr + 8 );
float32x4_t _w3 = vld1q_f32( kptr + 12 );
#if __aarch64__
_sum = vmlaq_laneq_f32(_sum, _w0, _val, 0);
_sum = vmlaq_laneq_f32(_sum, _w1, _val, 1);
_sum = vmlaq_laneq_f32(_sum, _w2, _val, 2);
_sum = vmlaq_laneq_f32(_sum, _w3, _val, 3);
#else
_sum = vmlaq_lane_f32(_sum, _w0, vget_low_f32(_val), 0);
_sum = vmlaq_lane_f32(_sum, _w1, vget_low_f32(_val), 1);
_sum = vmlaq_lane_f32(_sum, _w2, vget_high_f32(_val), 0);
_sum = vmlaq_lane_f32(_sum, _w3, vget_high_f32(_val), 1);
#endif
tmpptr += 4;
kptr += 16;
}
if (activation_type == 1)
{
float32x4_t _zero = vdupq_n_f32(0.f);
_sum = vmaxq_f32(_sum, _zero);
}
else if (activation_type == 2)
{
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _slope = vdupq_n_f32(activation_params[0]);
_sum = vbslq_f32(vcleq_f32(_sum, _zero), vmulq_f32(_sum, _slope), _sum);
}
else if (activation_type == 3)
{
float32x4_t _min = vdupq_n_f32(activation_params[0]);
float32x4_t _max = vdupq_n_f32(activation_params[1]);
_sum = vmaxq_f32(_sum, _min);
_sum = vminq_f32(_sum, _max);
}
else if (activation_type == 4)
{
float32x4_t _one = vdupq_n_f32(1.f);
_sum = vnegq_f32(_sum);
_sum = exp_ps(_sum);
_sum = vaddq_f32(_sum, _one);
float32x4_t _outp = vrecpeq_f32(_sum);
_outp = vmulq_f32(vrecpsq_f32(_sum, _outp), _outp);
// _outp = vmulq_f32(vrecpsq_f32(_sum, _outp), _outp);
_sum = _outp;
}
vst1q_f32(outptr, _sum);
outptr += 4;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// float* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const float* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const float* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
|
greywolfoptimizer.h | #ifndef GREYWOLFOPTIMIZER_H
#define GREYWOLFOPTIMIZER_H
#include "../entities/problem.h"
#include "globalsolver.h"
#include <iostream>
#include <memory>
template <class T>
class GreyWolfOptimizer : public GlobalSolver<T>
{
public:
GreyWolfOptimizer(int numberOfAgents, std::shared_ptr<Problem<T>> prob) : GlobalSolver<T>(numberOfAgents, prob)
{
if (this->numberOfAgents < 3) {
std::cerr << "The number of individuals needs to be equal or higher than 3" << std::endl;
exit(EXIT_FAILURE);
}
puts("Grey Wolf Optimizer instantiated");
}
void solve()
{
if (this->maxIterations == 0 && this->runningTime == 0) {
std::cerr << "Use \"setMaxIterations(int)\" or \"setRunningTime(double)\" to "
"define a stopping criteria!"
<< endl;
exit(EXIT_FAILURE);
} else
std::cout << "Grey Wolf Optimizer search procedure" << std::endl;
utils::startTimeCounter();
// Current population
std::cout << this->numberOfAgents << endl;
std::vector<std::vector<double>> wolves(this->numberOfAgents);
#pragma omp parallel for
for (size_t i = 0; i < this->numberOfAgents; i++)
this->problem->fillRandomDecisionVariables(wolves[i]);
// Stores the objective value of each individual
std::vector<double> wolvesFitness(this->numberOfAgents);
#pragma omp parallel for
for (size_t i = 0; i < this->numberOfAgents; i++) {
switch (this->problem->getRepType()) {
case RepresentationType::DIRECT:
wolvesFitness[i] = this->problem->evaluate(wolves[i]);
break;
case RepresentationType::INDIRECT:
std::shared_ptr<Solution<T>> sol = this->problem->construct(wolves[i]);
wolvesFitness[i] = sol->getFitness();
break;
}
#pragma omp critical
this->updateGlobalBest(wolves[i], wolvesFitness[i], true);
}
std::vector<double> alpha, beta, delta;
double fitnessAlpha, fitnessBeta, fitnessDelta;
int iteration = -1;
while (iteration++ < this->maxIterations || utils::getCurrentTime() < this->runningTime) {
for (size_t i = 0; i < this->numberOfAgents; i++) {
if (alpha.empty() || wolvesFitness[i] < fitnessAlpha) {
alpha = wolves[i];
fitnessAlpha = wolvesFitness[i];
} else if (beta.empty() || wolvesFitness[i] < fitnessBeta) {
beta = wolves[i];
fitnessBeta = wolvesFitness[i];
} else if (delta.empty() || wolvesFitness[i] < fitnessDelta) {
delta = wolves[i];
fitnessDelta = wolvesFitness[i];
}
}
// Decreases linearly from 2 to 0
double a;
if (iteration < this->maxIterations)
a = 2 - iteration * (2. / this->maxIterations);
else
a = 2 - utils::getCurrentTime() * (2. / this->runningTime);
#pragma omp parallel for
for (size_t i = 0; i < this->numberOfAgents; i++) {
for (size_t j = 0; j < this->problem->getDimension(); j++) {
double r1 = utils::getRandom();
double r2 = utils::getRandom();
double A1 = 2 * a * r1 - a;
double C1 = 2 * r2;
const double dAlpha = abs(C1 * alpha[j] - wolves[i][j]);
const double X1 = alpha[j] - A1 * dAlpha;
r1 = utils::getRandom();
r2 = utils::getRandom();
double A2 = 2 * a * r1 - a;
double C2 = 2 * r2;
const double dBeta = std::abs(C2 * beta[j] - wolves[i][j]);
const double X2 = beta[j] - A2 * dBeta;
r1 = utils::getRandom();
r2 = utils::getRandom();
double A3 = 2 * a * r1 - a;
double C3 = 2 * r2;
const double dDelta = std::abs(C3 * delta[j] - wolves[i][j]);
const double X3 = delta[j] - A3 * dDelta;
wolves[i][j] = max(this->problem->getLb()[j], min(X1 + X2 + X3 / 3, this->problem->getUb()[j]));
}
}
#pragma omp parallel for
for (size_t i = 0; i < this->numberOfAgents; i++) {
switch (this->problem->getRepType()) {
case RepresentationType::DIRECT:
wolvesFitness[i] = this->problem->evaluate(wolves[i]);
break;
case RepresentationType::INDIRECT:
std::shared_ptr<Solution<T>> sol = this->problem->construct(wolves[i]);
wolvesFitness[i] = sol->getFitness();
break;
}
#pragma omp critical
this->updateGlobalBest(wolves[i], wolvesFitness[i], true);
}
}
std::cout << "Best solution " << this->globalBestFitness << " Running time: " << utils::getCurrentTime() << std::endl << "Best solution decision variables: ";
utils::printVector(this->globalBest);
}
};
#endif // GREYWOLFOPTIMIZER_H
|
GB_binop__lxor_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint8)
// A*D function (colscale): GB (_AxD__lxor_uint8)
// D*A function (rowscale): GB (_DxB__lxor_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint8)
// C=scalar+B GB (_bind1st__lxor_uint8)
// C=scalar+B' GB (_bind1st_tran__lxor_uint8)
// C=A+scalar GB (_bind2nd__lxor_uint8)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT8 || GxB_NO_LXOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
toimg.c | /* Copyright 2013-2018 The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013, 2015 Martin Uecker <uecker@eecs.berkeley.edu>
* 2015, 2018 Jon Tamir <jtamir@eecs.berkeley.edu>
*/
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <strings.h>
#include <complex.h>
#include <stdbool.h>
#include <math.h>
#include "num/multind.h"
#include "num/init.h"
#include "num/flpmath.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "misc/mmio.h"
#include "misc/png.h"
#include "misc/dicom.h"
#include "misc/opts.h"
#ifndef DIMS
#define DIMS 16
#endif
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(complex float)
#endif
static const char usage_str[] = "[-h] <input> <output_prefix>";
static const char help_str[] = "Create magnitude images as png or proto-dicom.\n"
"The first two non-singleton dimensions will\n"
"be used for the image, and the other dimensions\n"
"will be looped over.\n";
// from view:src/draw.c
static double clamp(double a, double b, double x)
{
return (x < a) ? a : ((x > b) ? b : x);
}
static double windowing(double g, double a, double b, double x)
{
return pow(clamp(0., 1., (x - a) / (b - a)), g);
}
static void toimg(bool dicom, bool use_windowing, const char* name, long inum, float gamma, float contrast, float window, float scale, long h, long w, const complex float* data)
{
int len = strlen(name);
assert(len >= 1);
int nr_bytes = dicom ? 2 : 3;
unsigned char (*buf)[h][w][nr_bytes] = TYPE_ALLOC(unsigned char[h][w][nr_bytes]);
float max_val = dicom ? 65535. : 255.;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
double val = cabsf(data[j * h + i]) / scale;
unsigned int value = (unsigned int)(max_val * (use_windowing ? windowing(gamma, contrast, window, val) : val));
if (!dicom) {
(*buf)[i][j][0] = value;
(*buf)[i][j][1] = value;
(*buf)[i][j][2] = value;
} else {
(*buf)[i][j][0] = (value >> 0) & 0xFF;
(*buf)[i][j][2] = (value >> 8) & 0xFF;
}
}
}
(dicom ? dicom_write : png_write_rgb24)(name, w, h, inum, &(*buf)[0][0][0]);
free(buf);
}
static void toimg_stack(const char* name, bool dicom, bool single_scale, bool use_windowing, float gamma, float contrast, float window, const long dims[DIMS], const complex float* data)
{
long data_size = md_calc_size(DIMS, dims);
long sq_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
int l = 0;
for (int i = 0; i < DIMS; i++)
if (1 != dims[i])
sq_dims[l++] = dims[i];
float max = 0.;
for (long i = 0; i < data_size; i++)
max = MAX(cabsf(data[i]), max);
int len = strlen(name);
assert(len >= 1);
long num_imgs = md_calc_size(DIMS - 2, sq_dims + 2);
long img_size = md_calc_size(2, sq_dims);
debug_printf(DP_INFO, "Writing %d image(s)...", num_imgs);
#pragma omp parallel for
for (long i = 0; i < num_imgs; i++) {
char name_i[len + 10]; // extra space for ".0000.png"
if (num_imgs > 1)
sprintf(name_i, "%s-%04ld.%s", name, i, dicom ? "dcm" : "png");
else
sprintf(name_i, "%s.%s", name, dicom ? "dcm" : "png");
float scale = 0.;
if (use_windowing)
scale = md_znorm(2, sq_dims, data + i * img_size) / md_calc_size(2, sq_dims);
else if (single_scale)
scale = max;
else
for (long j = 0; j < md_calc_size(2, sq_dims); j++)
scale = MAX(cabsf(data[i * img_size + j]), scale);
if (0. == scale)
scale = 1.;
toimg(dicom, use_windowing, name_i, i, gamma, contrast, window, scale, sq_dims[0], sq_dims[1], data + i * img_size);
}
debug_printf(DP_INFO, "done.\n", num_imgs);
}
int main_toimg(int argc, char* argv[])
{
float gamma = 1.;
float contrast = 0.;
float window = 750.;
bool use_windowing = false;
bool single_scale = true;
bool dicom = false;
const struct opt_s opts[] = {
OPT_FLOAT('g', &gamma, "gamma", "gamma level"),
OPT_FLOAT('c', &contrast, "contrast", "contrast level"),
OPT_FLOAT('w', &window, "window", "window level"),
OPT_SET('d', &dicom, "write to dicom format (deprecated, use extension .dcm)"),
OPT_CLEAR('m', &single_scale, "re-scale each image"),
OPT_SET('W', &use_windowing, "use dynamic windowing"),
};
cmdline(&argc, argv, 2, 2, usage_str, help_str, ARRAY_SIZE(opts), opts);
num_init();
char* ext = rindex(argv[2], '.');
if (NULL != ext) {
assert(!dicom);
if (0 == strcmp(ext, ".dcm"))
dicom = true;
else
if (0 != strcmp(ext, ".png"))
error("Unknown file extension.");
*ext = '\0';
}
long dims[DIMS];
complex float* data = load_cfl(argv[1], DIMS, dims);
toimg_stack(argv[2], dicom, single_scale, use_windowing, gamma, contrast, window, dims, data);
unmap_cfl(DIMS, dims, data);
return 0;
}
|
GB_binop__isle_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int32)
// A*D function (colscale): GB (_AxD__isle_int32)
// D*A function (rowscale): GB (_DxB__isle_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int32)
// C=scalar+B GB (_bind1st__isle_int32)
// C=scalar+B' GB (_bind1st_tran__isle_int32)
// C=A+scalar GB (_bind2nd__isle_int32)
// C=A'+scalar GB (_bind2nd_tran__isle_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Poisson.h | /*
Copyright (c) 2015, Harsh Bhatia (bhatia4@llnl.gov)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** ---------------------------------------------------------------------------
* Poisson.h
* Library for solving the Poisson equation using integral solution.
* - Technical details can be found in the paper doi: 10.1109/TVCG.2014.2312012
*
* Author: Harsh Bhatia. bhatia4@llnl.gov
*/
#ifndef POISSON_H
#define POISSON_H
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "RGrid.h"
#include "GreensFunction.h"
#include "trapezoidalIntegration.h"
/** functions for solving 2D and 3D Poisson equation on a regular grid
f is called the source function that creates a potential (RHS)
phi is the potential function (LHS)
*/
namespace Poisson {
/// ---------------------------------------------------------------------------
/// ----- serial implementation!
/// I use a map that computes the Green's function only once, and
/// uses it for solving all equations
template <typename T>
T* solve_Poisson_2D(const T* f, const RGrid &rgrid, const GreenFunc<T> &gFunc){
if(rgrid.dim != 2){
printf(" solve_Poisson_2D requires a 2D regular grid!\n");
return 0;
}
printf(" Solving 2D Poisson Eqn...");
fflush(stdout);
clock_t tic = clock();
// variables for showing progress
float percent_factor = 100.0f / (float)rgrid.sz;
float percent_unit = 0.1; // show progress at this percent work
uint progress_unit = std::max((uint)1, (uint) (percent_unit / percent_factor));
/// output and temporary memory
T* phi = new T[rgrid.sz];
T* fd = new T[rgrid.sz];
/// -------------------------------------------------------------------
/// compute at v0
for(size_t v0 = 0; v0 < rgrid.sz; v0++){
/// populate the temporary field for the entire grid!
#pragma omp parallel for
for(size_t v = 0; v < rgrid.sz; v++){
fd[v] = gFunc.get_val(v, v0) * f[v];
}
/// integrate
phi[v0] = integ::trapezoidal_2D(fd, rgrid.X, rgrid.Y, rgrid.dx, rgrid.dy);
if(v0 % progress_unit == 0){
printf("\r Solving 2D Poisson Eqn... %.1f %%", (float)v0*percent_factor);
fflush(stdout);
}
}
delete []fd;
double sec = (double)(clock() - tic)/CLOCKS_PER_SEC;
if(sec < 1) printf("\r Solving 2D Poisson Eqn... Done! in %.3f msec.\n", 1000.0*sec);
else printf("\r Solving 2D Poisson Eqn... Done! in %.3f sec.\n", sec);
return phi;
}
template <typename T>
T* solve_Poisson_3D(const T* f, const RGrid &rgrid, const GreenFunc<T> &rmap){
if(rgrid.dim != 3){
printf(" solve_Poisson_3D requires a 3D regular grid!\n");
return 0;
}
printf(" Solving 3D Poisson Eqn...");
fflush(stdout);
clock_t tic = clock();
// variables for showing progress
float percent_factor = 100.0f / (float)rgrid.sz;
float percent_unit = 0.1; // show progress at this percent work
uint progress_unit = std::max((uint)1, (uint) (percent_unit / percent_factor));
//printf(" %f %f %d\n", percent_factor, percent_unit, progress_unit);
/// output and temporary memory
T* phi = new T[rgrid.sz];
T* fd = new T[rgrid.sz];
/// -------------------------------------------------------------------
/// compute at v0
for(size_t v0 = 0; v0 < rgrid.sz; v0++){
/// populate the temporary field for the entire grid!
#pragma omp parallel for
for(size_t v = 0; v < rgrid.sz; v++){
fd[v] = rmap.get_val(v, v0) * f[v];
}
/// integrate
phi[v0] = integ::trapezoidal_3D(fd, rgrid.X, rgrid.Y, rgrid.Z, rgrid.dx, rgrid.dy, rgrid.dz);
if(v0 % progress_unit == 0){
printf("\r Solving 3D Poisson Eqn... %.1f %%", (float)v0*percent_factor);
fflush(stdout);
}
}
delete []fd;
double sec = (double)(clock() - tic)/CLOCKS_PER_SEC;
if(sec < 1) printf("\r Solving 3D Poisson Eqn... Done! in %.3f msec.\n", 1000.0*sec);
else printf("\r Solving 3D Poisson Eqn... Done! in %.3f sec.\n", sec);
return phi;
}
/// ---------------------------------------------------------------------------
/// ----- serial implementation!
/// compute the Green's function on the fly
/// too many computations of log and sqrt!
template <typename T>
T* solve_Poisson_2D(const T* f, const RGrid &rgrid){
if(rgrid.dim != 2){
printf(" solve_Poisson_2D requires a 2D regular grid!\n");
return 0;
}
printf(" Solving 2D Poisson Eqn...");
fflush(stdout);
clock_t tic = clock();
// variables for showing progress
float percent_factor = 100.0f / (float)rgrid.sz;
float percent_unit = 0.1; // show progress at this percent work
uint progress_unit = std::max((uint)1, (uint) (percent_unit / percent_factor));
//printf(" %f %f %d\n", percent_factor, percent_unit, progress_unit);
/// -------------------------------------------------------------------
/// the 2D Green's function is ---- 1/(2pi) log( dist(x,x_0) )
/// instead, I use ------------ 1/(4pi) log( dist^2(x,x_0) )
T Greens_factor = 0.25/M_PI;
/// output and temporary memory
T* phi = new T[rgrid.sz];
T* fd = new T[rgrid.sz];
/// -------------------------------------------------------------------
/// compute at v0
for(size_t v0 = 0; v0 < rgrid.sz; v0++){
/// populate the temporary field for the entire grid!
#pragma omp parallel for
for(size_t v = 0; v < rgrid.sz; v++){
// Harsh fixed this on 12/11/2016
#if 1
T dist = rgrid.get_dist_sq(v, v0);
#else
/// don't want to compute log (0)
T dist = (v == v0) ? (0.01 * (rgrid.dx+rgrid.dy)) :
rgrid.get_dist_sq(v, v0);
#endif
fd[v] = (log(dist) * f[v]);
}
/// integrate
phi[v0] = Greens_factor * integ::trapezoidal_2D(fd, rgrid.X, rgrid.Y, rgrid.dx, rgrid.dy);
if(v0 % progress_unit == 0){
printf("\r Solving 2D Poisson Eqn... %.1f %%", (float)v0*percent_factor);
fflush(stdout);
}
}
delete []fd;
double sec = (double)(clock() - tic)/CLOCKS_PER_SEC;
if(sec < 1) printf("\r Solving 2D Poisson Eqn... Done! in %.3f msec.\n", 1000.0*sec);
else printf("\r Solving 2D Poisson Eqn... Done! in %.3f sec.\n", sec);
return phi;
}
template <typename T>
T* solve_Poisson_3D(const T* f, const RGrid &rgrid){
if(rgrid.dim != 3){
printf(" solve_Poisson_3D requires a 3D regular grid!\n");
return 0;
}
printf(" Solving 3D Poisson Eqn...");
fflush(stdout);
clock_t tic = clock();
// variables for showing progress
float percent_factor = 100.0f / (float)rgrid.sz;
float percent_unit = 0.1; // show progress at this percent work
uint progress_unit = std::max((uint)1, (uint) (percent_unit / percent_factor));
//printf(" %f %f %d\n", percent_factor, percent_unit, progress_unit);
/// -------------------------------------------------------------------
/// the 3D Green's function is ---- 1/(4pi) dist(x,x_0)
T Greens_factor = -0.25/M_PI;
/// output and temporary memory
T* phi = new T[rgrid.sz];
T* fd = new T[rgrid.sz];
/// -------------------------------------------------------------------
/// compute at v0
for(size_t v0 = 0; v0 < rgrid.sz; v0++){
/// populate the temporary field for the entire grid!
#pragma omp parallel for
for(size_t v = 0; v < rgrid.sz; v++){
// Harsh fixed this on 12/11/2016
#if 1
T dist = rgrid.get_dist(v, v0);
#else
/// don't want to compute 1 / 0
T dist = (v == v0) ? (0.01 * (rgrid.dx+rgrid.dy+rgrid.dz)) :
rgrid.get_dist(v, v0);
#endif
fd[v] = (f[v] / dist);
}
/// integrate
phi[v0] = Greens_factor * integ::trapezoidal_3D(fd, rgrid.X, rgrid.Y, rgrid.Z, rgrid.dx, rgrid.dy, rgrid.dz);
if(v0 % progress_unit == 0){
printf("\r Solving 3D Poisson Eqn... %.1f %%", (float)v0*percent_factor);
fflush(stdout);
}
}
delete []fd;
double sec = (double)(clock() - tic)/CLOCKS_PER_SEC;
if(sec < 1) printf("\r Solving 3D Poisson Eqn... Done! in %.3f msec.\n", 1000.0*sec);
else printf("\r Solving 3D Poisson Eqn... Done! in %.3f sec.\n", sec);
return phi;
}
/// ---------------------------------------------------------------------------
}
#endif
|
parallel_snake.c | #include "main.h"
#include <stdio.h>
#include <stdlib.h>
typedef struct cel Cell;
typedef struct coord Coord;
// aloca o celula pentru lista
Cell* alocaCell(int i, int j) {
Cell* cell = (Cell*)malloc(sizeof(Cell));
cell->prev = NULL;
cell->next = NULL;
cell->poz.line = i;
cell->poz.col = j;
return cell;
}
//adauga celula la sfarsitul listei
void addCell(struct snake* snake, Cell* el) {
if(snake->coada == NULL && snake->cap == NULL) {
snake->cap = el;
snake->coada = el;
return;
}
snake->coada->next = el;
el->prev = snake->coada;
el->next = NULL;
snake->coada = el;
}
//adauga celula din coada in capul listei
void attachHead(struct snake* snake) {
if(snake->cap == snake->coada)
return;
snake->coada->next = snake->cap;
snake->cap->prev = snake->coada;
snake->coada = snake->coada->prev;
snake->coada->next = NULL;
snake->cap = snake->cap->prev;
snake->cap->prev = NULL;
}
//completeaza sarpele pornind din coordonatele capului
void completeSnake(struct snake* snake, int **world, int num_lines, int num_cols) {
int crti = snake->head.line;;
int crtj = snake->head.col;
int auxi;
int auxj;
char prevDir = snake->direction;
snake->cap = alocaCell(snake->head.line, snake->head.col);
snake->coada = snake->cap;
auxi = crti;
auxj = crtj;
while(1) {
// daca directia precedenta este N, atunci nu se va mai cauta sus
if(prevDir == 'N') {
if(auxj == 0)
auxj = num_cols - 1;
else
auxj--;
//stanga
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'E';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if(auxj == num_cols - 1)
auxj = 0;
else
auxj++;
//dreapta
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'V';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if (auxi == num_lines - 1) {
auxi = 0;
}
else {
auxi++;
}
//jos
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'N';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
break;
//daca pozitia precedenta este S, nu se va mai cauta jos
} else if (prevDir == 'S') {
if(auxi == 0)
auxi = num_lines - 1;
else
auxi--;
//sus
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'S';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if(auxj == 0)
auxj = num_cols - 1;
else
auxj--;
//stanga
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'E';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if(auxj == num_cols - 1)
auxj = 0;
else
auxj++;
//dreapta
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'V';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
break;
//daca pozitia precedenta este E, nu se va mai cauta in dreapta
} else if (prevDir == 'E') {
if(auxi == 0)
auxi = num_lines - 1;
else
auxi--;
//sus
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'S';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if (auxi == num_lines - 1)
auxi = 0;
else
auxi++;
//jos
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'N';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if(auxj == 0)
auxj = num_cols - 1;
else
auxj--;
//stanga
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'E';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
break;
//daca pozitia precedenta este V, nu se va mai cauta in stanga
} else if (prevDir == 'V') {
if(auxi == 0)
auxi = num_lines - 1;
else
auxi--;
//sus
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'S';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if (auxi == num_lines - 1)
auxi = 0;
else
auxi++;
//jos
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'N';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if(auxj == num_cols - 1)
auxj = 0;
else
auxj++;
//dreapta
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'V';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
//daca nu am gasit in cele 3 directii, atunci inseamna ca am dat de coada si ies din loop
break;
}
}
}
int checkCollision(struct snake* snake, int** world) {
return (world[snake->head.line][snake->head.col] != 0);
}
//calcularea noilor pozitii
void computeMoves(struct snake* snake, int** world, int num_lines, int num_cols) {
snake->lastTail = snake->coada->poz; //se pastreaza coada anterioara a sarpelui
world[snake->coada->poz.line][snake->coada->poz.col] = 0; // se sterge coada, pentru ca sarpele o sa se mute
snake->oldHead = snake->head; //se pastreaza ultimul cap al sarpelui
attachHead(snake); //segmentul de coada se ataseaza capului
//se calculeaza noua pozitia a capului
if(snake->direction == 'N') {
if(snake->head.line == 0) {
snake->head.line = num_lines - 1;
} else {
snake->head.line--;
}
} else if(snake->direction == 'S') {
if(snake->head.line == num_lines - 1) {
snake->head.line = 0;
} else {
snake->head.line++;
}
} else if(snake->direction == 'V') {
if(snake->head.col == 0) {
snake->head.col = num_cols - 1;
} else {
snake->head.col--;
}
} else if(snake->direction == 'E') {
if(snake->head.col == num_cols - 1) {
snake->head.col = 0;
} else {
snake->head.col++;
}
}
//actualizare cap
snake->cap->poz.line = snake->head.line;
snake->cap->poz.col = snake->head.col;
}
void run_simulation(int num_lines, int num_cols, int **world, int num_snakes,
struct snake *snakes, int step_count, char *file_name) {
int ok = 0;
int i, j;
//completarea serpilor se face paralel, deoarece este intependent de fiecare sarpe
#pragma omp parallel for
for(i = 0; i < num_snakes; i++)
completeSnake(&snakes[i], world, num_lines, num_cols);
for(i = 0; i < step_count; i++) {
//calcularea noilor pozitii, tot paralel, din aceiasi cauza
#pragma omp parallel for
for(j = 0; j < num_snakes; j++) {
computeMoves(&snakes[j], world, num_lines, num_cols);
}
//testarea de coliziune si mutarea se face in thread-ul master
for(j = 0; j < num_snakes; j++) {
if(!checkCollision(&snakes[j], world)) {
world[snakes[j].head.line][snakes[j].head.col] = snakes[j].encoding;
} else {
ok = 1;
break;
}
}
//daca s-a intalnit o coliziune, trebuie refacuta harta la pasul anterior
if(ok) {
//pana la indicele j (indicele sarpelui care a facut coliziunea), se vor restaura capetele
#pragma omp parallel for
for(i = 0; i < j; i++) {
world[snakes[i].head.line][snakes[i].head.col] = 0;
}
//pentru toti serpii se vor restaura pozitiile capetelor anterioare
#pragma omp parallel for
for(i = 0; i < num_snakes; i++) {
snakes[i].head = snakes[i].oldHead;
world[snakes[i].lastTail.line][snakes[i].lastTail.col] = snakes[i].encoding;
}
//daca s-a intalnit coliziune trebuie iesit din loop
break;
}
}
} |
omp-hello.c | /*
============================================================================
Name : omp-hello.c
Author :
Version :
Copyright : Your copyright notice
Description : Hello OpenMP World in C
============================================================================
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
/**
* Hello OpenMP World prints the number of threads and the current thread id
*/
int main (int argc, char *argv[]) {
int numThreads, tid;
/* This creates a team of threads; each thread has own copy of variables */
#pragma omp parallel private(numThreads, tid)
{
tid = omp_get_thread_num();
printf("Hello World from thread number %d\n", tid);
/* The following is executed by the master thread only (tid=0) */
if (tid == 0)
{
numThreads = omp_get_num_threads();
printf("Number of threads is %d\n", numThreads);
}
}
return 0;
}
|
bitshuffle_core.c | /*
* Bitshuffle - Filter for improving compression of typed binary data.
*
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*/
#include "bitshuffle_core.h"
#include "bitshuffle_internals.h"
#include <stdio.h>
#include <string.h>
#if defined(__AVX2__) && defined (__SSE2__)
#define USEAVX2
#endif
#if defined(__SSE2__)
#define USESSE2
#endif
// Conditional includes for SSE2 and AVX2.
#ifdef USEAVX2
#include <immintrin.h>
#elif defined USESSE2
#include <emmintrin.h>
#endif
// Macros.
#define CHECK_MULT_EIGHT(n) if (n % 8) return -80;
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
/* ---- Functions indicating compile time instruction set. ---- */
int bshuf_using_SSE2(void) {
#ifdef USESSE2
return 1;
#else
return 0;
#endif
}
int bshuf_using_AVX2(void) {
#ifdef USEAVX2
return 1;
#else
return 0;
#endif
}
/* ---- Worker code not requiring special instruction sets. ----
*
* The following code does not use any x86 specific vectorized instructions
* and should compile on any machine
*
*/
/* Transpose 8x8 bit array packed into a single quadword *x*.
* *t* is workspace. */
#define TRANS_BIT_8X8(x, t) { \
t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AALL; \
x = x ^ t ^ (t << 7); \
t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCLL; \
x = x ^ t ^ (t << 14); \
t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0LL; \
x = x ^ t ^ (t << 28); \
}
/* Transpose 8x8 bit array along the diagonal from upper right
to lower left */
#define TRANS_BIT_8X8_BE(x, t) { \
t = (x ^ (x >> 9)) & 0x0055005500550055LL; \
x = x ^ t ^ (t << 9); \
t = (x ^ (x >> 18)) & 0x0000333300003333LL; \
x = x ^ t ^ (t << 18); \
t = (x ^ (x >> 36)) & 0x000000000F0F0F0FLL; \
x = x ^ t ^ (t << 36); \
}
/* Transpose of an array of arbitrarily typed elements. */
#define TRANS_ELEM_TYPE(in, out, lda, ldb, type_t) { \
size_t ii, jj, kk; \
const type_t* in_type = (const type_t*) in; \
type_t* out_type = (type_t*) out; \
for(ii = 0; ii + 7 < lda; ii += 8) { \
for(jj = 0; jj < ldb; jj++) { \
for(kk = 0; kk < 8; kk++) { \
out_type[jj*lda + ii + kk] = \
in_type[ii*ldb + kk * ldb + jj]; \
} \
} \
} \
for(ii = lda - lda % 8; ii < lda; ii ++) { \
for(jj = 0; jj < ldb; jj++) { \
out_type[jj*lda + ii] = in_type[ii*ldb + jj]; \
} \
} \
}
/* Memory copy with bshuf call signature. For testing and profiling. */
int64_t bshuf_copy(const void* in, void* out, const size_t size,
const size_t elem_size) {
const char* in_b = (const char*) in;
char* out_b = (char*) out;
memcpy(out_b, in_b, size * elem_size);
return size * elem_size;
}
/* Transpose bytes within elements, starting partway through input. */
int64_t bshuf_trans_byte_elem_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start) {
size_t ii, jj, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(start);
if (size > start) {
// ii loop separated into 2 loops so the compiler can unroll
// the inner one.
for (ii = start; ii + 7 < size; ii += 8) {
for (jj = 0; jj < elem_size; jj++) {
for (kk = 0; kk < 8; kk++) {
out_b[jj * size + ii + kk]
= in_b[ii * elem_size + kk * elem_size + jj];
}
}
}
for (ii = size - size % 8; ii < size; ii ++) {
for (jj = 0; jj < elem_size; jj++) {
out_b[jj * size + ii] = in_b[ii * elem_size + jj];
}
}
}
return size * elem_size;
}
/* Transpose bytes within elements. */
int64_t bshuf_trans_byte_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_byte_elem_remainder(in, out, size, elem_size, 0);
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start_byte) {
const uint64_t* in_b = (const uint64_t*) in;
uint8_t* out_b = (uint8_t*) out;
uint64_t x, t;
size_t ii, kk;
size_t nbyte = elem_size * size;
size_t nbyte_bitrow = nbyte / 8;
uint64_t e=1;
const int little_endian = *(uint8_t *) &e == 1;
const size_t bit_row_skip = little_endian ? nbyte_bitrow : -nbyte_bitrow;
const int64_t bit_row_offset = little_endian ? 0 : 7 * nbyte_bitrow;
CHECK_MULT_EIGHT(nbyte);
CHECK_MULT_EIGHT(start_byte);
for (ii = start_byte / 8; ii < nbyte_bitrow; ii ++) {
x = in_b[ii];
if (little_endian) {
TRANS_BIT_8X8(x, t);
} else {
TRANS_BIT_8X8_BE(x, t);
}
for (kk = 0; kk < 8; kk ++) {
out_b[bit_row_offset + kk * bit_row_skip + ii] = x;
x = x >> 8;
}
}
return size * elem_size;
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_bit_byte_remainder(in, out, size, elem_size, 0);
}
/* General transpose of an array, optimized for large element sizes. */
int64_t bshuf_trans_elem(const void* in, void* out, const size_t lda,
const size_t ldb, const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
for(ii = 0; ii < lda; ii++) {
for(jj = 0; jj < ldb; jj++) {
memcpy(&out_b[(jj*lda + ii) * elem_size],
&in_b[(ii*ldb + jj) * elem_size], elem_size);
}
}
return lda * ldb * elem_size;
}
/* Transpose rows of shuffled bits (size / 8 bytes) within groups of 8. */
int64_t bshuf_trans_bitrow_eight(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t nbyte_bitrow = size / 8;
CHECK_MULT_EIGHT(size);
return bshuf_trans_elem(in, out, 8, elem_size, nbyte_bitrow);
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_scal(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj, kk, nbyte_row;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
nbyte_row = size / 8;
CHECK_MULT_EIGHT(size);
for (jj = 0; jj < elem_size; jj++) {
for (ii = 0; ii < nbyte_row; ii++) {
for (kk = 0; kk < 8; kk++) {
out_b[ii * 8 * elem_size + jj * 8 + kk] = \
in_b[(jj * 8 + kk) * nbyte_row + ii];
}
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_scal(const void* in, void* out, \
const size_t size, const size_t elem_size) {
const char *in_b;
char *out_b;
uint64_t x, t;
size_t ii, jj, kk;
size_t nbyte, out_index;
uint64_t e=1;
const int little_endian = *(uint8_t *) &e == 1;
const size_t elem_skip = little_endian ? elem_size : -elem_size;
const uint64_t elem_offset = little_endian ? 0 : 7 * elem_size;
CHECK_MULT_EIGHT(size);
in_b = (const char*) in;
out_b = (char*) out;
nbyte = elem_size * size;
for (jj = 0; jj < 8 * elem_size; jj += 8) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) {
x = *((uint64_t*) &in_b[ii + jj]);
if (little_endian) {
TRANS_BIT_8X8(x, t);
} else {
TRANS_BIT_8X8_BE(x, t);
}
for (kk = 0; kk < 8; kk++) {
out_index = ii + jj / 8 + elem_offset + kk * elem_skip;
*((uint8_t*) &out_b[out_index]) = x;
x = x >> 8;
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_scal(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_scal(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* ---- Worker code that uses SSE2 ----
*
* The following code makes use of the SSE2 instruction set and specialized
* 16 byte registers. The SSE2 instructions are present on modern x86
* processors. The first Intel processor microarchitecture supporting SSE2 was
* Pentium 4 (2000).
*
*/
#ifdef USESSE2
/* Transpose bytes within elements for 16 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b = (const char*) in;
char *out_b = (char*) out;
__m128i a0, b0, a1, b1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 1*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 2,
size - size % 16);
}
/* Transpose bytes within elements for 32 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
__m128i a0, b0, c0, d0, a1, b1, c1, d1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 3*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi64(a1, c1);
b0 = _mm_unpackhi_epi64(a1, c1);
c0 = _mm_unpacklo_epi64(b1, d1);
d0 = _mm_unpackhi_epi64(b1, d1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 4,
size - size % 16);
}
/* Transpose bytes within elements for 64 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
size_t ii;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 3*16]);
e0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 4*16]);
f0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 5*16]);
g0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 6*16]);
h0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 7*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
e1 = _mm_unpacklo_epi8(e0, f0);
f1 = _mm_unpackhi_epi8(e0, f0);
g1 = _mm_unpacklo_epi8(g0, h0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
e0 = _mm_unpacklo_epi8(e1, f1);
f0 = _mm_unpackhi_epi8(e1, f1);
g0 = _mm_unpacklo_epi8(g1, h1);
h0 = _mm_unpackhi_epi8(g1, h1);
a1 = _mm_unpacklo_epi32(a0, c0);
b1 = _mm_unpackhi_epi32(a0, c0);
c1 = _mm_unpacklo_epi32(b0, d0);
d1 = _mm_unpackhi_epi32(b0, d0);
e1 = _mm_unpacklo_epi32(e0, g0);
f1 = _mm_unpackhi_epi32(e0, g0);
g1 = _mm_unpacklo_epi32(f0, h0);
h1 = _mm_unpackhi_epi32(f0, h0);
a0 = _mm_unpacklo_epi64(a1, e1);
b0 = _mm_unpackhi_epi64(a1, e1);
c0 = _mm_unpacklo_epi64(b1, f1);
d0 = _mm_unpackhi_epi64(b1, f1);
e0 = _mm_unpacklo_epi64(c1, g1);
f0 = _mm_unpackhi_epi64(c1, g1);
g0 = _mm_unpacklo_epi64(d1, h1);
h0 = _mm_unpackhi_epi64(d1, h1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
_mm_storeu_si128((__m128i *) &out_b[4*size + ii], e0);
_mm_storeu_si128((__m128i *) &out_b[5*size + ii], f0);
_mm_storeu_si128((__m128i *) &out_b[6*size + ii], g0);
_mm_storeu_si128((__m128i *) &out_b[7*size + ii], h0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 8,
size - size % 16);
}
/* Transpose bytes within elements using best SSE algorithm available. */
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
// Trivial cases: power of 2 bytes.
switch (elem_size) {
case 1:
count = bshuf_copy(in, out, size, elem_size);
return count;
case 2:
count = bshuf_trans_byte_elem_SSE_16(in, out, size);
return count;
case 4:
count = bshuf_trans_byte_elem_SSE_32(in, out, size);
return count;
case 8:
count = bshuf_trans_byte_elem_SSE_64(in, out, size);
return count;
}
// Worst case: odd number of bytes. Turns out that this is faster for
// (odd * 2) byte elements as well (hence % 4).
if (elem_size % 4) {
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
return count;
}
// Multiple of power of 2: transpose hierarchically.
{
size_t nchunk_elem;
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
if ((elem_size % 8) == 0) {
nchunk_elem = elem_size / 8;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t);
count = bshuf_trans_byte_elem_SSE_64(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size);
} else if ((elem_size % 4) == 0) {
nchunk_elem = elem_size / 4;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t);
count = bshuf_trans_byte_elem_SSE_32(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size);
} else {
// Not used since scalar algorithm is faster.
nchunk_elem = elem_size / 2;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t);
count = bshuf_trans_byte_elem_SSE_16(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size);
}
free(tmp_buf);
return count;
}
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
uint16_t* out_ui16;
int64_t count;
size_t nbyte = elem_size * size;
CHECK_MULT_EIGHT(nbyte);
__m128i xmm;
int32_t bt;
for (ii = 0; ii + 15 < nbyte; ii += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_ui16 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 16);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_SSE(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
__m128 *as, *bs, *cs, *ds, *es, *fs, *gs, *hs;
for (ii = 0; ii + 7 < nrows; ii += 8) {
for (jj = 0; jj + 15 < nbyte_row; jj += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 0)*nbyte_row + jj]);
b0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 1)*nbyte_row + jj]);
c0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 2)*nbyte_row + jj]);
d0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 3)*nbyte_row + jj]);
e0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 4)*nbyte_row + jj]);
f0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 5)*nbyte_row + jj]);
g0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 6)*nbyte_row + jj]);
h0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 7)*nbyte_row + jj]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpacklo_epi8(c0, d0);
c1 = _mm_unpacklo_epi8(e0, f0);
d1 = _mm_unpacklo_epi8(g0, h0);
e1 = _mm_unpackhi_epi8(a0, b0);
f1 = _mm_unpackhi_epi8(c0, d0);
g1 = _mm_unpackhi_epi8(e0, f0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi16(a1, b1);
b0 = _mm_unpacklo_epi16(c1, d1);
c0 = _mm_unpackhi_epi16(a1, b1);
d0 = _mm_unpackhi_epi16(c1, d1);
e0 = _mm_unpacklo_epi16(e1, f1);
f0 = _mm_unpacklo_epi16(g1, h1);
g0 = _mm_unpackhi_epi16(e1, f1);
h0 = _mm_unpackhi_epi16(g1, h1);
a1 = _mm_unpacklo_epi32(a0, b0);
b1 = _mm_unpackhi_epi32(a0, b0);
c1 = _mm_unpacklo_epi32(c0, d0);
d1 = _mm_unpackhi_epi32(c0, d0);
e1 = _mm_unpacklo_epi32(e0, f0);
f1 = _mm_unpackhi_epi32(e0, f0);
g1 = _mm_unpacklo_epi32(g0, h0);
h1 = _mm_unpackhi_epi32(g0, h0);
// We don't have a storeh instruction for integers, so interpret
// as a float. Have a storel (_mm_storel_epi64).
as = (__m128 *) &a1;
bs = (__m128 *) &b1;
cs = (__m128 *) &c1;
ds = (__m128 *) &d1;
es = (__m128 *) &e1;
fs = (__m128 *) &f1;
gs = (__m128 *) &g1;
hs = (__m128 *) &h1;
_mm_storel_pi((__m64 *) &out_b[(jj + 0) * nrows + ii], *as);
_mm_storel_pi((__m64 *) &out_b[(jj + 2) * nrows + ii], *bs);
_mm_storel_pi((__m64 *) &out_b[(jj + 4) * nrows + ii], *cs);
_mm_storel_pi((__m64 *) &out_b[(jj + 6) * nrows + ii], *ds);
_mm_storel_pi((__m64 *) &out_b[(jj + 8) * nrows + ii], *es);
_mm_storel_pi((__m64 *) &out_b[(jj + 10) * nrows + ii], *fs);
_mm_storel_pi((__m64 *) &out_b[(jj + 12) * nrows + ii], *gs);
_mm_storel_pi((__m64 *) &out_b[(jj + 14) * nrows + ii], *hs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 1) * nrows + ii], *as);
_mm_storeh_pi((__m64 *) &out_b[(jj + 3) * nrows + ii], *bs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 5) * nrows + ii], *cs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 7) * nrows + ii], *ds);
_mm_storeh_pi((__m64 *) &out_b[(jj + 9) * nrows + ii], *es);
_mm_storeh_pi((__m64 *) &out_b[(jj + 11) * nrows + ii], *fs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 13) * nrows + ii], *gs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 15) * nrows + ii], *hs);
}
for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj];
out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj];
out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj];
out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj];
out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj];
out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj];
out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj];
out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
uint16_t* out_ui16 = (uint16_t*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m128i xmm;
int32_t bt;
if (elem_size % 2) {
bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size);
} else {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
out_ui16[ind / 2] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_SSE(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_SSE(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USESSE2
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
#endif // #ifdef USESSE2
/* ---- Code that requires AVX2. Intel Haswell (2013) and later. ---- */
/* ---- Worker code that uses AVX2 ----
*
* The following code makes use of the AVX2 instruction set and specialized
* 32 byte registers. The AVX2 instructions are present on newer x86
* processors. The first Intel processor microarchitecture supporting AVX2 was
* Haswell (2013).
*
*/
#ifdef USEAVX2
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
int32_t* out_i32;
size_t nbyte = elem_size * size;
int64_t count;
__m256i ymm;
int32_t bt;
for (ii = 0; ii + 31 < nbyte; ii += 32) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
out_i32 = (int32_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_i32 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 32);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_AVX(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t hh, ii, jj, kk, mm;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size,
elem_size);
__m256i ymm_0[8];
__m256i ymm_1[8];
__m256i ymm_storeage[8][4];
for (jj = 0; jj + 31 < nbyte_row; jj += 32) {
for (ii = 0; ii + 3 < elem_size; ii += 4) {
for (hh = 0; hh < 4; hh ++) {
for (kk = 0; kk < 8; kk ++){
ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[
(ii * 8 + hh * 8 + kk) * nbyte_row + jj]);
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 2; kk ++){
for (mm = 0; mm < 2; mm ++){
ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
}
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 8; kk ++){
ymm_storeage[kk][hh] = ymm_1[kk];
}
}
for (mm = 0; mm < 8; mm ++) {
for (kk = 0; kk < 4; kk ++){
ymm_0[kk] = ymm_storeage[mm][kk];
}
ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]);
ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]);
ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]);
ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]);
ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32);
ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32);
ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49);
ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]);
}
}
}
for (ii = 0; ii < nrows; ii ++ ) {
for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
char* out_b = (char*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m256i ymm;
int32_t bt;
if (elem_size % 4) {
return bshuf_shuffle_bit_eightelem_SSE(in, out, size, elem_size);
} else {
for (jj = 0; jj + 31 < 8 * elem_size; jj += 32) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
* (int32_t *) &out_b[ind] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_AVX(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_AVX(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USEAVX2
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
#endif // #ifdef USEAVX2
/* ---- Drivers selecting best instruction set at compile time. ---- */
int64_t bshuf_trans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_trans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_trans_bit_elem_SSE(in, out, size, elem_size);
#else
count = bshuf_trans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
int64_t bshuf_untrans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_untrans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_untrans_bit_elem_SSE(in, out, size, elem_size);
#else
count = bshuf_untrans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
/* ---- Wrappers for implementing blocking ---- */
/* Wrap a function for processing a single block to process an entire buffer in
* parallel. */
int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, const void* in, void* out, \
const size_t size, const size_t elem_size, size_t block_size) {
size_t ii;
int64_t err = 0;
int64_t count, cum_count=0;
size_t last_block_size;
size_t leftover_bytes;
size_t this_iter;
char *last_in;
char *last_out;
ioc_chain C;
ioc_init(&C, in, out);
if (block_size == 0) {
block_size = bshuf_default_block_size(elem_size);
}
if (block_size % BSHUF_BLOCKED_MULT) return -81;
#if defined(_OPENMP)
#pragma omp parallel for schedule(dynamic, 1) \
private(count) reduction(+ : cum_count)
#endif
for (ii = 0; ii < size / block_size; ii ++) {
count = fun(&C, block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
last_block_size = size % block_size;
last_block_size = last_block_size - last_block_size % BSHUF_BLOCKED_MULT;
if (last_block_size) {
count = fun(&C, last_block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
if (err < 0) return err;
leftover_bytes = size % BSHUF_BLOCKED_MULT * elem_size;
//this_iter;
last_in = (char *) ioc_get_in(&C, &this_iter);
ioc_set_next_in(&C, &this_iter, (void *) (last_in + leftover_bytes));
last_out = (char *) ioc_get_out(&C, &this_iter);
ioc_set_next_out(&C, &this_iter, (void *) (last_out + leftover_bytes));
memcpy(last_out, last_in, leftover_bytes);
ioc_destroy(&C);
return cum_count + leftover_bytes;
}
/* Bitshuffle a single block. */
int64_t bshuf_bitshuffle_block(ioc_chain *C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_trans_bit_elem(in, out, size, elem_size);
return count;
}
/* Bitunshuffle a single block. */
int64_t bshuf_bitunshuffle_block(ioc_chain* C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_untrans_bit_elem(in, out, size, elem_size);
return count;
}
/* Write a 64 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint64_BE(void* buf, uint64_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t pow28 = 1 << 8;
for (ii = 7; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 64 bit unsigned integer from a buffer big endian order. */
uint64_t bshuf_read_uint64_BE(void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 7; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* Write a 32 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint32_BE(void* buf, uint32_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t pow28 = 1 << 8;
for (ii = 3; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 32 bit unsigned integer from a buffer big endian order. */
uint32_t bshuf_read_uint32_BE(const void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 3; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* ---- Public functions ----
*
* See header file for description and usage.
*
*/
size_t bshuf_default_block_size(const size_t elem_size) {
// This function needs to be absolutely stable between versions.
// Otherwise encoded data will not be decodable.
size_t block_size = BSHUF_TARGET_BLOCK_SIZE_B / elem_size;
// Ensure it is a required multiple.
block_size = (block_size / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT;
return MAX(block_size, BSHUF_MIN_RECOMMEND_BLOCK);
}
int64_t bshuf_bitshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitshuffle_block, in, out, size,
elem_size, block_size);
}
int64_t bshuf_bitunshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitunshuffle_block, in, out, size,
elem_size, block_size);
}
#undef TRANS_BIT_8X8
#undef TRANS_ELEM_TYPE
#undef MAX
#undef CHECK_MULT_EIGHT
#undef CHECK_ERR_FREE
#undef USESSE2
#undef USEAVX2
|
mkl_util.h | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#if defined(INTEL_MKL_ML_ONLY) || defined(INTEL_MKL_DNN_ONLY)
#ifndef INTEL_MKL
#error "INTEL_MKL_{ML,DNN}_ONLY require INTEL_MKL"
#endif
#endif
#if defined(INTEL_MKL_ML_ONLY) && defined(INTEL_MKL_DNN_ONLY)
#error "at most one of INTEL_MKL_ML_ONLY and INTEL_MKL_DNN_ONLY may be defined"
#endif
#ifdef INTEL_MKL_ML_ONLY
#error \
"Compiling for INTEL MKL ML only is no longer supported.Please use MKL DNN (the default option for --config=mkl)"
#endif
#ifdef INTEL_MKL_ML_ONLY
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#include "mkl_service.h"
#include "mkl_trans.h"
#endif
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
#include "tensorflow/core/lib/core/stringpiece.h"
using mkldnn::engine;
using mkldnn::memory;
using mkldnn::padding_kind;
using mkldnn::primitive;
using mkldnn::reorder;
#endif
#ifdef _WIN32
typedef unsigned int uint;
#endif
namespace tensorflow {
// The file contains a number of utility classes and functions used by MKL
// enabled kernels
// This class encapsulates all the meta data that is associated with an MKL
// tensor. A tensor is an MKL tensor if it was created as the result of an
// MKL operation, and did not go through a conversion to a standard
// Tensorflow tensor.
// For use with MKL ML, has been deprecated
typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims;
// The dimensions order that MKL DNN internally uses for 2D activations
// [Batch, Channel, Height, Width] and
// for 2D filters [Out_Channel, In_Channel, Height, Width].
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
// The dimensions order that MKL DNN internally uses for 3D activations
// [Batch, Channel, Depth, Height, Width] and
// for 3D filters [Out_Channel, In_Channel, Depth, Height, Width].
typedef enum {
Dim3d_N = 0,
Dim3d_C = 1,
Dim3d_D = 2,
Dim3d_H = 3,
Dim3d_W = 4,
Dim3d_O = 0,
Dim3d_I = 1
} MklDnnDims3D;
// Enum for the order of dimensions of a TF 2D filter with shape [filter_height,
// filter_width, in_channels, out_channels]
typedef enum {
TF_2DFILTER_DIM_H = 0,
TF_2DFILTER_DIM_W = 1,
TF_2DFILTER_DIM_I = 2,
TF_2DFILTER_DIM_O = 3
} TFFilterDims2d;
// Enum for the order of dimensions of a TF 3D filter with shape [filter_depth,
// filter_height, filter_width, in_channels, out_channels]
typedef enum {
TF_3DFILTER_DIM_P = 0,
TF_3DFILTER_DIM_H = 1,
TF_3DFILTER_DIM_W = 2,
TF_3DFILTER_DIM_I = 3,
TF_3DFILTER_DIM_O = 4
} TFFilterDims3d;
// The dimensions order that MKL DNN requires for the filter in a grouped
// convolution (2D only)
typedef enum {
MKL_GROUP_FILTER_DIM_G = 0,
MKL_GROUP_FILTER_DIM_O = 1,
MKL_GROUP_FILTER_DIM_I = 2,
MKL_GROUP_FILTER_DIM_H = 3,
MKL_GROUP_FILTER_DIM_W = 4
} MklDnnFilterGroupDims;
// Enum used to templatize MklOp kernel implementations
// that support both fp32 and int8 versions.
enum class MklQuantization {
QUANTIZED_VERSION,
FP_VERSION,
};
static const int kSmallBatchSize = 32;
#ifdef INTEL_MKL_ML_ONLY
class MklShape {
public:
MklShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy
~MklShape() {
if (sizes_) delete[] sizes_;
if (strides_) delete[] strides_;
if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS);
if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS);
if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_;
}
const bool IsMklTensor() const { return isMklTensor_; }
void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; }
void SetDimensions(const size_t dimension) { dimension_ = dimension; }
void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; }
void SetMklLayout(const void* primitive, size_t resourceType) {
CHECK_EQ(
dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive,
(dnnResourceType_t)resourceType),
E_SUCCESS);
}
void SetTfLayout(const size_t dimension, const size_t* sizes,
const size_t* strides) {
dimension_ = dimension;
if (dimension > 0) { // MKl doesn't support zero dimension tensors
sizes_ = new size_t[dimension];
strides_ = new size_t[dimension];
for (int ii = 0; ii < dimension; ii++) {
sizes_[ii] = sizes[ii];
strides_[ii] = strides[ii];
}
CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides),
E_SUCCESS);
}
}
// Default case - MKL dim ordering is opposite of TF dim ordering
// MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim
// TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim
// For layers that rely on data_format semantics (conv, pooling etc.)
// or operate only on certain dimensions (relu, concat, split etc.),
// Mkl APIs might require us to reorder these dimensions. In such cases,
// kernels should explicitly set this map
void SetTfDimOrder(const size_t dimension) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = dimension - (ii + 1);
}
}
void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii];
}
}
void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
CHECK_EQ(dimension, 4);
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N;
}
const dnnLayout_t GetMklLayout() const { return mklLayout_; }
const dnnLayout_t GetTfLayout() const { return tfLayout_; }
const dnnLayout_t GetCurLayout() const {
return isMklTensor_ ? mklLayout_ : tfLayout_;
}
size_t GetDimension() const { return dimension_; }
const size_t* GetSizes() const { return sizes_; }
int64 dim_size(int index) const { return sizes_[index]; }
int64 tf_dim_size(int index) const {
return sizes_[tf_to_mkl_dim_map_[index]];
}
const size_t* GetStrides() const { return strides_; }
const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; }
size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Channel dimension.
bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Batch dimension.
bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Width dimension.
bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Height dimension.
bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; }
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NCHW format.
bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NHWC format.
bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
void GetConvertedFlatData(dnnLayout_t targetLayout, void* input,
void* output) const {
dnnLayout_t curLayout;
if (isMklTensor_)
curLayout = mklLayout_;
else
curLayout = tfLayout_;
dnnPrimitive_t convert;
CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout),
E_SUCCESS);
CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS);
CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS);
}
// The following methods are used for serializing and de-serializing the
// contents of the mklshape object.
// The data is serialized in this order
// isMklTensor_
// dimension_
// sizes_
// strides_
// mklLayout_
// tfLayout_
// tf_to_mkl_dim_map_
#define SIZE_OF_MKL_DNN_BUF \
(dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to
// serialize dnn_layout pointer
// Size of buffer to hold the serialized object, the size is computed as
// follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) +
// sizeof(strides_)
// + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer)
// + sizeof(tf_to_mkl_dim_map_)
#define SIZE_OF_MKL_SERIAL_DATA(dims) \
(2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF)
// First we need to define some macro for offsets into the serial buffer where
// different elements of Mklshape is written/read from
#define IS_MKL_TENSOR_OFFSET 0
// Location from start of buffer where isMklTensor_ is serialized
#define DIMS_OFFSET \
(IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_
// Location of sizes. Note dim is not used here, left here
// to make macros consistent.
#define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t))
#define STRIDES_OFFSET(dims) \
(SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides
#define MKL_LAYOUT_OFFSET(dims) \
(STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_
#define TF_LAYOUT_OFFSET(dims) \
(MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_
// Location of tf_to_mkl_dim_map_
#define TF_TO_MKL_DIM_MAP_OFFSET(dims) \
(TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF)
// TODO(agramesh1) make sure to create a const to share with rewrite pass
// for min size of MKL metadata tensor.
void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize";
// Make sure buffer holds at least isMklTensor_
isMklTensor_ =
*reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0;
if (isMklTensor_) { // If it is an MKL Tensor then read the rest
dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET));
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small in DeSerialize";
sizes_ = new size_t[dimension_];
strides_ = new size_t[dimension_];
tf_to_mkl_dim_map_ = new size_t[dimension_];
for (int i = 0; i < dimension_; i++) {
sizes_[i] =
reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i];
strides_[i] = reinterpret_cast<const size_t*>(
buf + STRIDES_OFFSET(dimension_))[i];
tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>(
buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i];
}
CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_,
buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
void SerializeMklShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small to Serialize";
*reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) =
isMklTensor_ ? 1 : 0;
if (isMklTensor_) {
*(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_;
for (int i = 0; i < dimension_; i++) {
reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] =
sizes_[i];
reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] =
strides_[i];
reinterpret_cast<size_t*>(buf +
TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] =
tf_to_mkl_dim_map_[i];
}
CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(
dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
private:
bool isMklTensor_ =
false; // Flag to indicate if the tensor is an MKL tensor or not
dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout
dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding
// Tensorflow tensor, used when conversion from MKL to standard tensor
size_t dimension_ = 0;
size_t* sizes_ = nullptr; // Required by MKL for conversions
size_t* strides_ = nullptr; // Required by MKL for conversions
size_t* tf_to_mkl_dim_map_ =
nullptr; // TF dimension corresponding to this MKL dimension
};
#else
// Forward decl
TensorFormat MklDnn3DDataFormatToTFDataFormat(memory::format format);
TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype);
class MklDnnShape {
private:
typedef struct {
/// Flag to indicate if the tensor is an MKL tensor or not
bool is_mkl_tensor_ = false;
/// Number of dimensions in Tensorflow format
size_t dimension_ = 0;
/// Required by MKLDNN for conversions
mkldnn_dims_t sizes_; // Required by MKL for conversions
memory::format tf_data_format_ = memory::format::format_undef;
memory::data_type T_ = memory::data_type::data_undef;
// MKL layout
mkldnn_memory_desc_t mkl_md_;
/// TF dimension corresponding to this MKL dimension
mkldnn_dims_t map_;
} MklShapeData;
MklShapeData data_;
typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy
/// Helper function to compare memory::desc objects for MklDnn.
/// May be this should go into MklDnn directly.
inline bool CompareMklDnnLayouts(const memory::desc& md1,
const memory::desc& md2) const {
mkldnn_memory_desc_t mdd1 = md1.data;
mkldnn_memory_desc_t mdd2 = md2.data;
const char* d1 = reinterpret_cast<const char*>(&mdd1);
const char* d2 = reinterpret_cast<const char*>(&mdd2);
size_t md_size = sizeof(mdd1);
for (size_t i = 0; i < md_size; i++) {
if (*d1++ != *d2++) {
return false;
}
}
return true;
}
/// Equality function for MklDnnShape objects
/// @return true if both are equal; false otherwise.
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
// If input tensors are in Mkl layout, then we check for dimensions and
// sizes.
if (this->IsMklTensor()) {
return this->GetTfShape() == input_shape.GetTfShape() &&
CompareMklDnnLayouts(this->GetMklLayout(),
input_shape.GetMklLayout());
}
return true;
}
/// Equality operator for MklDnnShape and TFShape.
/// Returns: true if TF shapes for both are the same, false otherwise
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline size_t GetDimension3D(char dimension) const {
int index = GetMklDnnTensor3DDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline int32 GetMklDnnTensor3DDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims3D::Dim3d_N;
case 'C':
return MklDnnDims3D::Dim3d_C;
case 'D':
return MklDnnDims3D::Dim3d_D;
case 'H':
return MklDnnDims3D::Dim3d_H;
case 'W':
return MklDnnDims3D::Dim3d_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
// Returns an mkldnn::memory::dims object that contains the sizes of this
// MklDnnShape object.
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
/// Return TensorShape that describes the Tensorflow shape of the tensor
/// represented by this MklShape.
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != memory::format::blocked) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
// If Tensorflow shape is in Blocked format, then we don't have dimension
// map for it. So we just create Tensorflow shape from sizes in the
// specified order.
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
inline void SetMklLayout(memory::primitive_desc* pd) {
CHECK_NOTNULL(pd);
data_.mkl_md_ = pd->desc().data;
}
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline memory::format GetTfDataFormat() const {
return data_.tf_data_format_;
}
/// We don't create primitive_descriptor for TensorFlow layout now.
/// We use lazy evaluation and create it only when needed. Input format can
/// also be Blocked format.
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
memory::format format) {
CHECK_EQ(dims, sizes.size());
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ii++) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != memory::format::blocked) {
SetTfDimOrder(dims, format);
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ii++) {
dims.push_back(data_.sizes_[ii]);
}
// Create Blocked memory desc if input TF format was set like that.
if (data_.tf_data_format_ == memory::format::blocked) {
auto strides = CalculateTFStrides(dims);
return CreateBlockedMemDescHelper(dims, strides, data_.T_);
} else {
return memory::desc(dims, data_.T_, data_.tf_data_format_);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
// nhasabni - I've removed SetTfDimOrder that was setting default order in
// case of MKL-ML. We don't need a case of default dimension order because
// when an operator that does not get data_format attribute gets all inputs
// in Tensorflow format, it will produce output in Tensorflow format.
inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
if (dimension == 5) {
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<3>(data_format, '0')] =
MklDnnDims3D::Dim3d_D;
data_.map_[GetTensorDimIndex<3>(data_format, '1')] =
MklDnnDims3D::Dim3d_H;
data_.map_[GetTensorDimIndex<3>(data_format, '2')] =
MklDnnDims3D::Dim3d_W;
data_.map_[GetTensorDimIndex<3>(data_format, 'C')] =
MklDnnDims3D::Dim3d_C;
data_.map_[GetTensorDimIndex<3>(data_format, 'N')] =
MklDnnDims3D::Dim3d_N;
} else {
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
}
inline void SetTfDimOrder(const size_t dimension, memory::format format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Channel dimension.
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Batch dimension.
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Width dimension.
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Height dimension.
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NCHW format.
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NHWC format.
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// The following methods are used for serializing and de-serializing the
/// contents of the mklshape object.
/// The data is serialized in this order
/// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_;
/// Size of buffer to hold the serialized object, the size is computed by
/// following above mentioned order
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
// Make sure buffer holds at least is_mkl_tensor_.
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
#endif
// List of MklShape objects. Used in Concat/Split layers.
#ifndef INTEL_MKL_ML_ONLY
typedef std::vector<MklDnnShape> MklDnnShapeList;
#else
typedef std::vector<MklShape> MklShapeList;
#endif
#ifdef INTEL_MKL_ML_ONLY
// Check if all tensors specified by MklShapes are MKL tensors.
inline bool AreAllMklTensors(const MklShapeList& shapes) {
for (auto& s : shapes) {
if (!s.IsMklTensor()) {
return false;
}
}
return true;
}
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
for (size_t j = 0; j < mkl_shape.GetDimension(); j++) {
// Outermost to innermost dimension
output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]);
}
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor);
dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout());
void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data());
void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data());
if (mkl_tensor.NumElements() != 0) {
mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer);
}
return output_tensor;
}
#else
using mkldnn::stream;
template <typename T>
class MklDnnData;
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklDnnShape& mkl_shape) {
Tensor output_tensor;
try {
if (!mkl_shape.IsMklTensor())
return mkl_tensor; // return input since it is already TF tensor
TensorShape output_shape = mkl_shape.GetTfShape();
;
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape,
&output_tensor);
auto cpu_engine = engine(engine::cpu, 0);
MklDnnData<T> input(&cpu_engine);
// Get Mkl layout of input tensor.
auto input_mkl_md = mkl_shape.GetMklLayout();
auto output_tf_md = mkl_shape.GetTfLayout();
auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine);
input.SetUsrMem(input_mkl_md, &mkl_tensor);
// reorder
if (input.IsReorderNeeded(output_tf_pd)) {
std::vector<primitive> net;
CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net),
true);
stream(stream::kind::eager).submit(net).wait();
} else {
// If not, just forward input tensor to output tensor.
CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape));
}
} catch (mkldnn::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
LOG(FATAL) << "Operation received an exception: " << error_msg;
}
return output_tensor;
}
#endif
// Get the MKL shape from the second string tensor
#ifdef INTEL_MKL_ML_ONLY
inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) {
mklshape->DeSerializeMklShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#else
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#endif
// Gets the actual input
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
ctext->input_list(name, input_tensors);
}
#ifdef INTEL_MKL_ML_ONLY
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#else
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#endif
#ifndef INTEL_MKL_ML_ONLY
/// Get shape of input tensor pointed by 'input_idx' in TensorShape format.
/// If the input tensor is in MKL layout, then obtains TensorShape from
/// MklShape.
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) {
// Sanity check.
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape);
if (input_mkl_shape.IsMklTensor()) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
#endif
#ifdef INTEL_MKL_ML_ONLY
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
#ifdef INTEL_MKL_ML_ONLY
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocates a temp tensor and returns the data buffer for temporary storage.
// Currently
#ifndef INTEL_MKL_ML_ONLY
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::primitive_desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
#else
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
dnnLayout_t lt_buff, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(
dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) /
sizeof(float) +
1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<float>().data());
}
#endif
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides,
const size_t* sizes) {
// MKL requires strides in NCHW
if (data_format == FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
#ifdef INTEL_MKL_ML_ONLY
inline void MklSizesToTFSizes(OpKernelContext* context,
TensorFormat data_format_,
const MklShape& mkl_shape,
TensorShape* tf_shape) {
size_t tf_dim = mkl_shape.GetDimension();
const size_t* tf_sizes = mkl_shape.GetSizes();
OP_REQUIRES(context, tf_dim == 4,
errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim"));
std::vector<int32> sizes;
sizes.push_back(tf_sizes[3]);
if (data_format_ == FORMAT_NHWC) {
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
sizes.push_back(tf_sizes[2]);
} else {
sizes.push_back(tf_sizes[2]);
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
}
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape));
}
#endif
inline int32 GetMklTensorDimIndex(char dimension) {
switch (dimension) {
case 'N':
return MklDims::N;
case 'C':
return MklDims::C;
case 'H':
return MklDims::H;
case 'W':
return MklDims::W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
#ifdef INTEL_MKL_ML_ONLY
inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) {
int index = GetMklTensorDimIndex(dimension);
CHECK(index >= 0 && index < mkl_shape.GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return mkl_shape.dim_size(index);
}
#endif
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
#ifdef INTEL_MKL_ML_ONLY
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#else
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#endif
#ifdef INTEL_MKL_ML_ONLY
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#else
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifndef INTEL_MKL_ML_ONLY
// Set a dummy MKLDNN shape (called when the output is in TF format)
inline void SetDummyMklDnnShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
const MklDnnShape& mkl_shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
// Forward the MKL shape ONLY (used in elementwise and other ops where
// we call the eigen implementation and MKL shape is not used)
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint32 idx_data_in,
uint32_t idx_data_out) {
uint32 idx_meta_in =
GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint32 idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifdef INTEL_MKL_ML_ONLY
// Set a dummy MKL shape (called when the output is in TF format)
inline void SetDummyMklShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
// We don't need these functions in MKLDNN. We have defined equality operator
// on MklDnnShape class directly.
// Checks if the TF shape for both MKL tensors is the same or not
// Returns: true if both TF shapes are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const MklShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const MklShape* input_shape_1) {
return MklCompareShapes(input_shape_1, input_shape_0);
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->dims() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->dims();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// These functions do not compile with MKL-DNN since mkl.h is missing.
// We may need to remove them later.
// TODO(intel_tf): Remove this routine when faster MKL layout conversion is
// out.
inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = input.dim_size(0);
int64 H = input.dim_size(1);
int64 W = input.dim_size(2);
int64 C = input.dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C,
buf_out + n * stride_n, H * W);
}
}
inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = (*output)->dim_size(0);
int64 H = (*output)->dim_size(1);
int64 W = (*output)->dim_size(2);
int64 C = (*output)->dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W,
buf_out + n * stride_n, C);
}
}
#endif
// -------------------------------------------------------------------
#ifndef INTEL_MKL_ML_ONLY
/// Return MKL-DNN data type (memory::data_type) for input type T
///
/// @input None
/// @return memory::data_type corresponding to type T
template <typename T>
static memory::data_type MklDnnType();
/// Instantiation for float type. Add similar instantiations for other
/// type if needed.
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
template <>
memory::data_type MklDnnType<quint8>() {
return memory::data_type::u8;
}
template <>
memory::data_type MklDnnType<qint8>() {
return memory::data_type::s8;
}
template <>
memory::data_type MklDnnType<qint32>() {
return memory::data_type::s32;
}
/// Map TensorFlow's data format into MKL-DNN 3D data format
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnn3DDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::ndhwc;
else if (format == FORMAT_NCHW)
return memory::format::ncdhw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
return memory::format::format_undef;
}
/// Map TensorFlow's data format into MKL-DNN data format
///
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::nhwc;
else if (format == FORMAT_NCHW)
return memory::format::nchw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
return memory::format::format_undef;
}
/// Map MKL-DNN data format to TensorFlow's data format
///
/// @input: memory::format
/// @return: Tensorflow data format corresponding to memory::format
/// Fails with an error if invalid data format.
inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) {
if (format == memory::format::nhwc || format == memory::format::ndhwc)
return FORMAT_NHWC;
else if (format == memory::format::nchw || format == memory::format::ncdhw)
return FORMAT_NCHW;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure
// that we don't come here.
return FORMAT_NHWC;
}
/// Map TensorShape object into memory::dims required by MKL-DNN
///
/// This function will simply map input TensorShape into MKL-DNN dims
/// naively. So it will preserve the order of dimensions. E.g., if
/// input tensor is in NHWC format, then dims will be in NHWC format
/// also.
///
/// @input TensorShape object in shape
/// @return memory::dims corresponding to TensorShape
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
/// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN
///
/// This function is a specific one than above function. It will map input
/// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the
/// order of dimensions. E.g., if input tensor is in NHWC format, then dims
/// will be in NCHW format, and not in NHWC format.
///
/// @input TensorShape object in shape
/// @return memory::dims in MKL-DNN required NCHW format
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
inline memory::dims TFShapeToMklDnnDimsInNCDHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnn3DDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex<3>(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex<3>(format, 'C'));
int d = shape.dim_size(GetTensorDimIndex<3>(format, '0'));
int h = shape.dim_size(GetTensorDimIndex<3>(format, '1'));
int w = shape.dim_size(GetTensorDimIndex<3>(format, '2'));
// MKL-DNN requires dimensions in NCDHW format.
return memory::dims({n, c, d, h, w});
}
/// Overloaded version of function above. Input parameters are
/// self-explanatory.
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Map MklDnn memory::dims object into TensorShape object.
///
/// This function will simply map input shape in MKL-DNN memory::dims format
/// in Tensorflow's TensorShape object by preserving dimension order.
///
/// @input MKL-DNN memory::dims object
/// @output TensorShape corresponding to memory::dims
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
/// Function to calculate strides given tensor shape in Tensorflow order
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
///
/// @input Tensorflow shape in memory::dims type
/// @return memory::dims containing strides for the tensor.
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline padding_kind TFPaddingToMklDnnPadding(Padding pad) {
// MKL-DNN only supports zero padding.
return padding_kind::zero;
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype) {
CHECK_EQ(dim.size(), strides.size());
// We have to construct memory descriptor in a C style. This is not at all
// ideal but MKLDNN does not offer any API to construct descriptor in
// blocked format except a copy constructor that accepts
// mkldnn_memory_desc_t.
mkldnn_memory_desc_t md;
md.primitive_kind = mkldnn_memory;
md.ndims = dim.size();
md.format = mkldnn_blocked;
md.data_type = memory::convert_to_c(dtype);
for (size_t i = 0; i < dim.size(); i++) {
md.layout_desc.blocking.block_dims[i] = 1;
md.layout_desc.blocking.strides[1][i] = 1;
md.layout_desc.blocking.strides[0][i] = strides[i];
md.layout_desc.blocking.padding_dims[i] = dim[i];
md.layout_desc.blocking.offset_padding_to_data[i] = 0;
md.dims[i] = dim[i];
}
md.layout_desc.blocking.offset_padding = 0;
return memory::desc(md);
}
template <typename T>
inline primitive FindOrCreateReorder(const memory* from, const memory* to);
/*
* Class to represent all the resources corresponding to a tensor in TensorFlow
* that are required to execute an operation (such as Convolution).
*/
template <typename T>
class MklDnnData {
private:
/// MKL-DNN memory primitive for input user memory
memory* user_memory_;
/// MKL-DNN memory primitive in case input or output reorder is needed.
memory* reorder_memory_;
/// Operations memory descriptor
memory::desc* op_md_;
// flat to indicate if data is 3D or not.
bool bIs3D;
/// Operations temp buffer
void* allocated_buffer_;
/// CPU engine on which operation will be executed
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
allocated_buffer_(nullptr),
cpu_engine_(e) {}
~MklDnnData() {
if (allocated_buffer_ != nullptr) {
cpu_allocator()->DeallocateRaw(allocated_buffer_);
}
cpu_engine_ = nullptr; // We don't own this.
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
void SetIs3DData(bool bIs3D_) { bIs3D = bIs3D_; }
bool GetIs3D() { return bIs3D; }
/// Set user memory primitive using specified dimensions, memory format and
/// data_buffer. Function automatically uses element data type by using
/// input type T used for creating call object.
///
/// In a nutshell, function allows user to describe the input tensor to
/// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and
/// memory format HWIO, and the buffer that contains actual values is
/// pointed by data_buffer.
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>());
}
/// A version of SetUsrMem call that allows user to create memory in blocked
/// format. So in addition to accepting dimensions, it also accepts strides.
/// This allows user to create memory for tensor in a format that is not
/// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6
/// dimensional tensor as a native format. But by using blocked format, a user
/// can create memory for 6D tensor.
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts memory
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) {
auto pd = memory::primitive_desc(md, *cpu_engine_);
SetUsrMem(pd, data_buffer);
}
/// A version of SetUsrMem with memory descriptor and tensor
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts primitive
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::primitive_desc& pd,
void* data_buffer = nullptr) {
CHECK_NOTNULL(cpu_engine_);
// TODO(nhasabni): can we remove dynamic memory allocation?
if (data_buffer) {
user_memory_ = new memory(pd, data_buffer);
} else {
user_memory_ = new memory(pd);
}
}
/// A version of SetUsrMem with primitive descriptor and tensor
inline void SetUsrMem(const memory::primitive_desc& pd,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(pd, GetTensorBuffer(tensor));
}
/// Get function for user memory primitive.
inline const memory* GetUsrMem() const { return user_memory_; }
/// Get function for primitive descriptor of user memory primitive.
inline const memory::primitive_desc GetUsrMemPrimDesc() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_primitive_desc();
}
/// Get function for descriptor of user memory.
inline memory::desc GetUsrMemDesc() {
// This is ugly. Why MKL-DNN does not provide desc() method of const type??
const memory::primitive_desc pd = GetUsrMemPrimDesc();
return const_cast<memory::primitive_desc*>(&pd)->desc();
}
/// Get function for data buffer of user memory primitive.
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(void* data_buffer) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
user_memory_->set_data_handle(data_buffer);
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(const Tensor* tensor) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(tensor);
user_memory_->set_data_handle(GetTensorBuffer(tensor));
}
/// allocate function for data buffer
inline void AllocateBuffer(size_t size) {
const int64 kMemoryAlginment = 64; // For AVX512 memory alignment.
allocated_buffer_ = cpu_allocator()->AllocateRaw(kMemoryAlginment, size);
}
inline void* GetAllocatedBuffer() { return allocated_buffer_; }
/// Get the memory primitive for input and output of an op. If inputs
/// to an op require reorders, then this function returns memory primitive
/// for reorder. Otherwise, it will return memory primitive for user memory.
///
/// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to
/// execute Conv2D, we need memory primitive for I and F. Buf if reorder is
/// required for I and F (say I_r is reorder primitive for I; F_r is reorder
/// primitive for F), then we need I_r and F_r to perform Conv2D.
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
/// Set memory descriptor of an operation in terms of dimensions and memory
/// format. E.g., For Conv2D, the dimensions would be same as user dimensions
/// but memory::format would be mkldnn::any because we want MKL-DNN to choose
/// best layout/format for given input dimensions.
inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) {
// TODO(nhasabni): can we remove dynamic memory allocation?
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
/// Get function for memory descriptor for an operation
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
/// Predicate that checks if we need to reorder user's memory into memory
/// pointed by op_pd.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const {
CHECK_NOTNULL(user_memory_);
return op_pd != user_memory_->get_primitive_desc();
}
/// Predicate that checks if we need to reorder user's memory into memory
/// based on the provided format.
///
/// @input: target_format - memory format of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::format& target_format) const {
CHECK_NOTNULL(user_memory_);
return target_format !=
user_memory_->get_primitive_desc().desc().data.format;
}
/// Function to create a reorder from memory pointed by from to memory pointed
/// by to. Returns created primitive.
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
/// Function to handle input reordering
///
/// Check if we need to reorder this input of an operation.
/// Return true and allocate reorder memory primitive if reorder is needed.
/// Otherwise, return false and do not allocate reorder memory primitive.
///
/// To check if reorder is needed, this function compares memory primitive
/// descriptor of an operation (op_pd) for the given input with the
/// user-specified memory primitive descriptor.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
reorder_memory_ = new memory(op_pd);
std::vector<primitive> net;
net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_));
stream(stream::kind::eager).submit(net).wait();
return true;
}
return false;
}
/// Overloaded version of above function that accepts memory buffer
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_data_handle - memory buffer where output of reorder needs to be
/// stored. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle) {
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
std::vector<primitive> net;
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_));
stream(stream::kind::eager).submit(net).wait();
return true;
}
return false;
}
/// Another overloaded version of CheckReorderToOpMem that accepts Tensor
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_tensor - Tensor whose buffer is to be used to store output of
/// reorder. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net);
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor) {
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor));
}
/// Function to handle output reorder
///
/// This function performs very similar functionality as input reordering
/// function above. The only difference is that this function does not add
/// reorder primitive to the net. The reason for this is: the reorder
/// primitive for output needs to be added to the list only after operation
/// has executed. But we need to prepare a temporary buffer in case output
/// reorder is needed. And this temporary buffer will hold the output of
/// an operation before it is fed to reorder primitive.
///
/// @input memory primitive descriptor for the given output of an operation
/// @return: true in case reorder of output is needed; false, otherwise.
inline bool PrepareReorderToUserMemIfReq(
const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
return true;
}
return false;
}
/// Function to actually insert reorder primitive in the net
///
/// This function completes remaining part of output reordering. It inserts
/// a reordering primitive from the temporary buffer that holds the output
/// to the user-specified output buffer.
///
/// @input: net - net to which to add reorder primitive
inline void InsertReorderToUserMem(std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
net->push_back(CreateReorder(reorder_memory_, user_memory_));
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// InsertReorderToUserMem(std::vector<primitive>* net), will remove
/// slow path in the future
inline void InsertReorderToUserMem() {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
std::vector<primitive> net;
net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_));
stream(stream::kind::eager).submit(net).wait();
}
};
/// Base class for operations with reuse of primitives
///
class MklPrimitive {
public:
virtual ~MklPrimitive() {}
// Dummy data which MKL DNN never operates on
unsigned char* DummyData = nullptr;
};
const mkldnn::memory::dims NONE_DIMS = {};
template <typename T>
class MklPrimitiveFactory {
public:
MklPrimitiveFactory() {}
~MklPrimitiveFactory() {}
MklPrimitive* GetOp(const string& key) {
auto& map = MklPrimitiveFactory<T>::GetHashMap();
auto stream_iter = map.find(key);
if (stream_iter == map.end()) {
return nullptr;
} else {
CHECK(stream_iter->second != nullptr) << "nullptr present in map";
return stream_iter->second;
}
}
void SetOp(const string& key, MklPrimitive* op) {
auto& map = MklPrimitiveFactory<T>::GetHashMap();
auto stream_iter = map.find(key);
CHECK(stream_iter == map.end());
map[key] = op;
}
/// Function to decide whether HW has AVX512 or AVX2
/// For those legacy device(w/o AVX512 and AVX2),
/// MKL-DNN GEMM will be used.
static inline bool IsLegacyPlatform() {
return (!port::TestCPUFeature(port::CPUFeature::AVX512F) &&
!port::TestCPUFeature(port::CPUFeature::AVX2));
}
/// Fuction to check whether primitive memory optimization is enabled
static inline bool IsPrimitiveMemOptEnabled() {
bool is_primitive_mem_opt_enabled = true;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE", true,
&is_primitive_mem_opt_enabled));
return is_primitive_mem_opt_enabled;
}
private:
static inline std::unordered_map<string, MklPrimitive*>& GetHashMap() {
static thread_local std::unordered_map<string, MklPrimitive*> map_;
return map_;
}
};
// utility class for creating keys of MKL primitive pool.
class FactoryKeyCreator {
public:
FactoryKeyCreator() { key_.reserve(kMaxKeyLength); }
~FactoryKeyCreator() {}
void AddAsKey(const string& str) { Append(str); }
void AddAsKey(const mkldnn::memory::dims& dims) {
for (unsigned int i = 0; i < dims.size(); i++) {
AddAsKey<int>(dims[i]);
}
}
template <typename T>
void AddAsKey(const T data) {
auto buffer = reinterpret_cast<const char*>(&data);
Append(StringPiece(buffer, sizeof(T)));
}
string GetKey() { return key_; }
private:
string key_;
const char delimiter = 'x';
const int kMaxKeyLength = 256;
void Append(StringPiece s) {
key_.append(string(s));
key_.append(1, delimiter);
}
};
static inline memory::format get_desired_format(int channel,
bool is_2d = true) {
memory::format fmt_desired = memory::format::any;
if (port::TestCPUFeature(port::CPUFeature::AVX512F)) {
fmt_desired = is_2d ? memory::format::nChw16c : memory::format::nCdhw16c;
} else if (port::TestCPUFeature(port::CPUFeature::AVX2) &&
(channel % 8) == 0) {
fmt_desired = is_2d ? memory::format::nChw8c
: memory::format::ncdhw; // no avx2 support for 3d yet.
} else {
fmt_desired = is_2d ? memory::format::nchw : memory::format::ncdhw;
}
return fmt_desired;
}
class MklReorderPrimitive : public MklPrimitive {
public:
explicit MklReorderPrimitive(const memory* from, const memory* to) {
Setup(from, to);
}
~MklReorderPrimitive() {}
std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; }
void SetMemory(const memory* from, const memory* to) {
context_.src_mem->set_data_handle(from->get_data_handle());
context_.dst_mem->set_data_handle(to->get_data_handle());
}
private:
struct ReorderContext {
std::shared_ptr<mkldnn::memory> src_mem;
std::shared_ptr<mkldnn::memory> dst_mem;
std::shared_ptr<primitive> reorder_prim;
ReorderContext()
: src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {}
} context_;
engine cpu_engine_ = engine(engine::cpu, 0);
void Setup(const memory* from, const memory* to) {
context_.src_mem.reset(new memory(
{from->get_primitive_desc().desc(), cpu_engine_}, DummyData));
context_.dst_mem.reset(
new memory({to->get_primitive_desc().desc(), cpu_engine_}, DummyData));
context_.reorder_prim = std::make_shared<mkldnn::reorder>(
reorder(*context_.src_mem, *context_.dst_mem));
}
};
template <typename T>
class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklReorderPrimitive* Get(const memory* from, const memory* to) {
auto reorderPrim = static_cast<MklReorderPrimitive*>(
MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to));
if (reorderPrim == nullptr) {
reorderPrim = new MklReorderPrimitive(from, to);
MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(from, to,
reorderPrim);
}
reorderPrim->SetMemory(from, to);
return reorderPrim;
}
static MklReorderPrimitiveFactory& GetInstance() {
static MklReorderPrimitiveFactory instance_;
return instance_;
}
private:
MklReorderPrimitiveFactory() {}
~MklReorderPrimitiveFactory() {}
static string CreateKey(const memory* from, const memory* to) {
string prefix = "reorder";
FactoryKeyCreator key_creator;
auto const& from_desc = from->get_primitive_desc().desc().data;
auto const& to_desc = to->get_primitive_desc().desc().data;
const int KIdxFirstStride = 0;
memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]);
memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]);
memory::dims from_strides(
from_desc.layout_desc.blocking.strides[KIdxFirstStride],
&from_desc.layout_desc.blocking
.strides[KIdxFirstStride][from_desc.ndims]);
memory::dims to_strides(
to_desc.layout_desc.blocking.strides[KIdxFirstStride],
&to_desc.layout_desc.blocking.strides[KIdxFirstStride][to_desc.ndims]);
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(static_cast<int>(from_desc.format));
key_creator.AddAsKey(static_cast<int>(from_desc.data_type));
key_creator.AddAsKey(from_dims);
key_creator.AddAsKey(from_strides);
key_creator.AddAsKey(static_cast<int>(to_desc.format));
key_creator.AddAsKey(static_cast<int>(to_desc.data_type));
key_creator.AddAsKey(to_dims);
key_creator.AddAsKey(to_strides);
return key_creator.GetKey();
}
MklPrimitive* GetReorder(const memory* from, const memory* to) {
string key = CreateKey(from, to);
return this->GetOp(key);
}
void SetReorder(const memory* from, const memory* to, MklPrimitive* op) {
string key = CreateKey(from, to);
this->SetOp(key, op);
}
};
/// Fuction to find(or create) a reorder from memory pointed by
/// from to memory pointed by to, it will created primitive or
/// get primitive from pool if it is cached.
/// Returns the primitive.
template <typename T>
inline primitive FindOrCreateReorder(const memory* from, const memory* to) {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
MklReorderPrimitive* reorder_prim =
MklReorderPrimitiveFactory<T>::Get(from, to);
return *reorder_prim->GetPrimitive();
}
// utility function to determine if it is conv 1x1 and stride != 1
// for purpose of temporarily disabling primitive reuse
inline bool IsConv1x1StrideNot1(memory::dims filter_dims,
memory::dims strides) {
if (filter_dims.size() != 4 || strides.size() != 2) return false;
return ((filter_dims[2] == 1) && (filter_dims[3] == 1) &&
((strides[0] != 1) || (strides[1] != 1)));
}
#endif // INTEL_MKL_DNN
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
|
GB_unop__identity_int32_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int32_fc64
// op(A') function: GB_unop_tran__identity_int32_fc64
// C type: int32_t
// A type: GxB_FC64_t
// cast: int32_t cij = GB_cast_to_int32_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int32_fc64
(
int32_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rgb2yuv_sse.c | /**
* @file rgb2yuv_sse.c
*
* RGB2YUV sse
*/
#include "rgb2yuv.h"
#include <xmmintrin.h>
#include <emmintrin.h>
#include <tmmintrin.h>
void rgb2yuv_sse(uint8_t *pixels, int count)
{
int i;
uint8_t *p = pixels;
__m128i v_byte1 = _mm_set1_epi32(0x000000ff);
__m128i v_byte3 = _mm_set1_epi32(0x00ff0000);
__m128i v_mat_00 = _mm_set1_epi16((short int)47);
__m128i v_mat_01 = _mm_set1_epi16((short int)157);
__m128i v_mat_02 = _mm_set1_epi16((short int)16);
__m128i v_mat_03 = _mm_set1_epi16((short int)4096);
__m128i v_mat_04 = _mm_set1_epi16((short int)-26);
__m128i v_mat_05 = _mm_set1_epi16((short int)-87);
__m128i v_mat_06 = _mm_set1_epi16((short int)112);
__m128i v_mat_07 = _mm_set1_epi16((short int)32768);
__m128i v_mat_08 = _mm_set1_epi16((short int)112);
__m128i v_mat_09 = _mm_set1_epi16((short int)-102);
__m128i v_mat_10 = _mm_set1_epi16((short int)-10);
__m128i v_mat_11 = _mm_set1_epi16((short int)32768);
__m128i mask2 = _mm_set1_epi32(0x00ff00ff);
__m128i mask_y1 = _mm_set_epi8((char)128, (char)128, 12, (char)128, (char)128, (char)128, 8, (char)128,
(char)128, (char)128, 4, (char)128, (char)128, (char)128, 0, (char)128);
__m128i mask_y2 = _mm_set_epi8((char)128, (char)128, 14, (char)128, (char)128, (char)128, 10, (char)128,
(char)128, (char)128, 6, (char)128, (char)128, (char)128, 2, (char)128);
__m128i mask_u1 = _mm_set_epi8((char)128, 12, (char)128, (char)128, (char)128, 8, (char)128, (char)128,
(char)128, 4, (char)128, (char)128, (char)128, 0, (char)128, (char)128);
__m128i mask_u2 = _mm_set_epi8((char)128, 14, (char)128, (char)128, (char)128, 10, (char)128, (char)128,
(char)128, 6, (char)128, (char)128, (char)128, 2, (char)128, (char)128);
__m128i mask_v1 = _mm_set_epi8(12, (char)128, (char)128, (char)128, 8, (char)128, (char)128, (char)128,
4, (char)128, (char)128, (char)128, 0, (char)128, (char)128, (char)128);
__m128i mask_v2 = _mm_set_epi8(14, (char)128, (char)128, (char)128, 10, (char)128, (char)128, (char)128,
6, (char)128, (char)128, (char)128, 2, (char)128, (char)128, (char)128);
// #pragma omp parallel for
for (i=0; i<count / 8; i++) {
__m128i a1, a2, r, g, b, y, u, v, res;
a1 = _mm_loadu_si128((__m128i *)&p[i*32]);
a2 = _mm_loadu_si128((__m128i *)&p[i*32 + 16]);
r = _mm_or_si128(_mm_and_si128(_mm_srli_si128(a1, 1), v_byte1), _mm_and_si128(_mm_slli_si128(a2, 1), v_byte3));
g = _mm_or_si128(_mm_and_si128(_mm_srli_si128(a1, 2), v_byte1), _mm_and_si128(a2, v_byte3));
b = _mm_or_si128(_mm_and_si128(_mm_srli_si128(a1, 3), v_byte1), _mm_and_si128(_mm_srli_si128(a2, 1), v_byte3));
y = _mm_add_epi16(
_mm_add_epi16(
_mm_mullo_epi16(r, v_mat_00),
_mm_mullo_epi16(g, v_mat_01)),
_mm_add_epi16(
_mm_mullo_epi16(b, v_mat_02),
v_mat_03));
y = _mm_and_si128(_mm_srai_epi16(y, 8), mask2);
u = _mm_add_epi16(
_mm_add_epi16(
_mm_mullo_epi16(r, v_mat_04),
_mm_mullo_epi16(g, v_mat_05)),
_mm_add_epi16(
_mm_mullo_epi16(b, v_mat_06),
v_mat_07));
u = _mm_and_si128(_mm_srai_epi16(u, 8), mask2);
v = _mm_add_epi16(
_mm_add_epi16(
_mm_mullo_epi16(r, v_mat_08),
_mm_mullo_epi16(g, v_mat_09)),
_mm_add_epi16(
_mm_mullo_epi16(b, v_mat_10),
v_mat_11));
v = _mm_and_si128(_mm_srai_epi16(v, 8), mask2);
res = _mm_or_si128(_mm_shuffle_epi8(y, mask_y1), _mm_shuffle_epi8(u, mask_u1));
res = _mm_or_si128(res, _mm_shuffle_epi8(v, mask_v1));
_mm_storeu_si128((__m128i *)&p[i*32], res);
res = _mm_or_si128(_mm_shuffle_epi8(y, mask_y2), _mm_shuffle_epi8(u, mask_u2));
res = _mm_or_si128(res, _mm_shuffle_epi8(v, mask_v2));
_mm_storeu_si128((__m128i *)&p[i*32 + 16], res);
}
} |
deriche.c | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* deriche.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "deriche.h"
/* Array initialization. */
static
void init_array (int w, int h, DATA_TYPE* alpha,
DATA_TYPE POLYBENCH_2D(imgIn, W, H, w, h),
DATA_TYPE POLYBENCH_2D(imgOut, W, H, w, h))
{
int i, j;
*alpha = 0.25; //parameter of the filter
//input should be between 0 and 1 (grayscale image pixel)
for (i = 0; i < w; i++)
for (j = 0; j < h; j++)
imgIn[i][j] = (DATA_TYPE) ((313 * i + 991 * j) % 65536) / 65535.0f;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int w, int h,
DATA_TYPE POLYBENCH_2D(imgOut, W, H, w, h))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("imgOut");
for (i = 0; i < w; i++)
for (j = 0; j < h; j++)
{
if ((i * h + j) % 20 == 0) fprintf(POLYBENCH_DUMP_TARGET, "\n");
fprintf(POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, imgOut[i][j]);
}
POLYBENCH_DUMP_END("imgOut");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
/* Original code provided by Gael Deest */
static
void kernel_deriche(int w, int h, DATA_TYPE alpha,
DATA_TYPE POLYBENCH_2D(imgIn, W, H, w, h),
DATA_TYPE POLYBENCH_2D(imgOut, W, H, w, h),
DATA_TYPE POLYBENCH_2D(y1, W, H, w, h),
DATA_TYPE POLYBENCH_2D(y2, W, H, w, h))
{
int i, j;
DATA_TYPE xm1, tm1, ym1, ym2;
DATA_TYPE xp1, xp2;
DATA_TYPE tp1, tp2;
DATA_TYPE yp1, yp2;
DATA_TYPE k;
DATA_TYPE a1, a2, a3, a4, a5, a6, a7, a8;
DATA_TYPE b1, b2, c1, c2;
k = (SCALAR_VAL(1.0) - EXP_FUN(-alpha)) * (SCALAR_VAL(1.0) - EXP_FUN(-alpha)) / (SCALAR_VAL(1.0) + SCALAR_VAL(2.0) * alpha * EXP_FUN(-alpha) - EXP_FUN(SCALAR_VAL(2.0) * alpha));
a1 = a5 = k;
a2 = a6 = k * EXP_FUN(-alpha) * (alpha - SCALAR_VAL(1.0));
a3 = a7 = k * EXP_FUN(-alpha) * (alpha + SCALAR_VAL(1.0));
a4 = a8 = -k * EXP_FUN(SCALAR_VAL(-2.0) * alpha);
b1 = POW_FUN(SCALAR_VAL(2.0), -alpha);
b2 = -EXP_FUN(SCALAR_VAL(-2.0) * alpha);
c1 = c2 = 1;
#pragma omp parallel for default(shared) private(i, j, ym1, ym2, xm1) firstprivate(w, h, a1, a2, b1, b2, imgIn)
for (i = 0; i < _PB_W; i++)
{
ym1 = SCALAR_VAL(0.0);
ym2 = SCALAR_VAL(0.0);
xm1 = SCALAR_VAL(0.0);
for (j = 0; j < _PB_H; j++)
{
y1[i][j] = a1 * imgIn[i][j] + a2 * xm1 + b1 * ym1 + b2 * ym2;
xm1 = imgIn[i][j];
ym2 = ym1;
ym1 = y1[i][j];
}
}
#pragma omp parallel for default(shared) private(i, j, yp1, yp2, xp1, xp2) firstprivate(w, h, a3, a4, b1, b2, imgIn)
for (i = 0; i < _PB_W; i++)
{
yp1 = SCALAR_VAL(0.0);
yp2 = SCALAR_VAL(0.0);
xp1 = SCALAR_VAL(0.0);
xp2 = SCALAR_VAL(0.0);
for (j = _PB_H - 1; j >= 0; j--)
{
y2[i][j] = a3 * xp1 + a4 * xp2 + b1 * yp1 + b2 * yp2;
xp2 = xp1;
xp1 = imgIn[i][j];
yp2 = yp1;
yp1 = y2[i][j];
}
}
#pragma omp parallel for default(shared) private(i, j) firstprivate(w, h, c1, y1, y2)
for (i = 0; i < _PB_W; i++)
{
for (j = 0; j < _PB_H; j++)
{
imgOut[i][j] = c1 * (y1[i][j] + y2[i][j]);
}
}
#pragma omp parallel for default(shared) private(j, i, tm1, ym1, ym2) firstprivate(h, w, a5, a6, b1, b2, imgOut)
for (j = 0; j < _PB_H; j++)
{
tm1 = SCALAR_VAL(0.0);
ym1 = SCALAR_VAL(0.0);
ym2 = SCALAR_VAL(0.0);
for (i = 0; i < _PB_W; i++)
{
y1[i][j] = a5 * imgOut[i][j] + a6 * tm1 + b1 * ym1 + b2 * ym2;
tm1 = imgOut[i][j];
ym2 = ym1;
ym1 = y1 [i][j];
}
}
#pragma omp parallel for default(shared) private(j, i, tp1, tp2, yp1, yp2) firstprivate(h, w, a7, a8, b1, b2, imgOut)
for (j = 0; j < _PB_H; j++)
{
tp1 = SCALAR_VAL(0.0);
tp2 = SCALAR_VAL(0.0);
yp1 = SCALAR_VAL(0.0);
yp2 = SCALAR_VAL(0.0);
for (i = _PB_W - 1; i >= 0; i--)
{
y2[i][j] = a7 * tp1 + a8 * tp2 + b1 * yp1 + b2 * yp2;
tp2 = tp1;
tp1 = imgOut[i][j];
yp2 = yp1;
yp1 = y2[i][j];
}
}
#pragma omp parallel for default(shared) private(i, j) firstprivate(w, h, c2, y1, y2)
for (i = 0; i < _PB_W; i++)
{
for (j = 0; j < _PB_H; j++)
imgOut[i][j] = c2 * (y1[i][j] + y2[i][j]);
}
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int w = W;
int h = H;
/* Variable declaration/allocation. */
DATA_TYPE alpha;
POLYBENCH_2D_ARRAY_DECL(imgIn, DATA_TYPE, W, H, w, h);
POLYBENCH_2D_ARRAY_DECL(imgOut, DATA_TYPE, W, H, w, h);
POLYBENCH_2D_ARRAY_DECL(y1, DATA_TYPE, W, H, w, h);
POLYBENCH_2D_ARRAY_DECL(y2, DATA_TYPE, W, H, w, h);
/* Initialize array(s). */
init_array (w, h, &alpha, POLYBENCH_ARRAY(imgIn), POLYBENCH_ARRAY(imgOut));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_deriche (w, h, alpha, POLYBENCH_ARRAY(imgIn), POLYBENCH_ARRAY(imgOut), POLYBENCH_ARRAY(y1), POLYBENCH_ARRAY(y2));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(w, h, POLYBENCH_ARRAY(imgOut)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(imgIn);
POLYBENCH_FREE_ARRAY(imgOut);
POLYBENCH_FREE_ARRAY(y1);
POLYBENCH_FREE_ARRAY(y2);
return 0;
}
|
ocp_nlp_sqp.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp.h"
// external
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
/************************************************
* options
************************************************/
int ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
int size = 0;
size += sizeof(ocp_nlp_sqp_opts);
size += ocp_nlp_opts_calculate_size(config, dims);
return size;
}
void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_opts);
opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_opts_calculate_size(config, dims);
assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// int ii;
// this first !!!
ocp_nlp_opts_initialize_default(config, dims, nlp_opts);
// SQP opts
opts->max_iter = 20;
opts->tol_stat = 1e-8;
opts->tol_eq = 1e-8;
opts->tol_ineq = 1e-8;
opts->tol_comp = 1e-8;
opts->ext_qp_res = 0;
opts->qp_warm_start = 0;
opts->warm_start_first_qp = false;
// overwrite default submodules opts
// qp tolerance
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", &opts->tol_stat);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", &opts->tol_eq);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", &opts->tol_comp);
return;
}
void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_update(config, dims, nlp_opts);
return;
}
void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
// config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value);
ocp_nlp_opts_set(config, nlp_opts, field, value);
if (!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "max_iter"))
{
int* max_iter = (int *) value;
opts->max_iter = *max_iter;
}
else if (!strcmp(field, "tol_stat"))
{
double* tol_stat = (double *) value;
opts->tol_stat = *tol_stat;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", value);
}
else if (!strcmp(field, "tol_eq"))
{
double* tol_eq = (double *) value;
opts->tol_eq = *tol_eq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", value);
}
else if (!strcmp(field, "tol_ineq"))
{
double* tol_ineq = (double *) value;
opts->tol_ineq = *tol_ineq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", value);
}
else if (!strcmp(field, "tol_comp"))
{
double* tol_comp = (double *) value;
opts->tol_comp = *tol_comp;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", value);
}
else if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "warm_start_first_qp"))
{
bool* warm_start_first_qp = (bool *) value;
opts->warm_start_first_qp = *warm_start_first_qp;
}
else
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
// printf("\nerror: ocp_nlp_sqp_opts_set: wrong field: %s\n", field);
// exit(1);
}
}
return;
}
void ocp_nlp_sqp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value);
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
size += sizeof(ocp_nlp_sqp_memory);
// nlp res
size += ocp_nlp_res_calculate_size(dims);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
int stat_m = opts->max_iter+1;
int stat_n = 6;
if (opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
size += 8; // initial align
make_int_multiple_of(8, &size);
return size;
}
void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_memory);
// nlp res
mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr);
c_ptr += mem->nlp_res->memsize;
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = opts->max_iter+1;
mem->stat_n = 6;
if (opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
mem->status = ACADOS_READY;
assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int size = 0;
// sqp
size += sizeof(ocp_nlp_sqp_workspace);
// nlp
size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// tmp qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
return size;
}
static void ocp_nlp_sqp_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_workspace *work)
{
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_workspace);
// nlp
work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr);
c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// tmp qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
acados_timer timer0, timer1;
acados_tic(&timer0);
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
// zero timers
double total_time = 0.0;
double tmp_time;
mem->time_qp_sol = 0.0;
mem->time_qp_solver_call = 0.0;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
mem->time_tot = 0.0;
int N = dims->N;
int ii;
int qp_iter = 0;
int qp_status = 0;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->nlp_opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_ptr(nlp_mem->qp_in->idxs[ii], nlp_mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem);
config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem);
config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem);
config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem);
config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem);
config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere (e.g. Python interface).
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
// initialize QP
ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// main sqp loop
int sqp_iter = 0;
nlp_mem->sqp_iter = &sqp_iter;
for (; sqp_iter < opts->max_iter; sqp_iter++)
{
// printf("\n------- sqp iter %d (max_iter %d) --------\n", sqp_iter, opts->max_iter);
// if (sqp_iter==2)
// exit(1);
// linearizate NLP and update QP matrices
acados_tic(&timer1);
ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_lin += acados_toc(&timer1);
// update QP rhs for SQP (step prim var, abs dual var)
ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// compute nlp residuals
ocp_nlp_res_compute(dims, nlp_in, nlp_out, mem->nlp_res, nlp_mem);
nlp_out->inf_norm_res = mem->nlp_res->inf_norm_res_g;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_b > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_b :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_d > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_d :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_m > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_m :
nlp_out->inf_norm_res;
// save statistics
if (sqp_iter < mem->stat_m)
{
mem->stat[mem->stat_n*sqp_iter+0] = mem->nlp_res->inf_norm_res_g;
mem->stat[mem->stat_n*sqp_iter+1] = mem->nlp_res->inf_norm_res_b;
mem->stat[mem->stat_n*sqp_iter+2] = mem->nlp_res->inf_norm_res_d;
mem->stat[mem->stat_n*sqp_iter+3] = mem->nlp_res->inf_norm_res_m;
}
// exit conditions on residuals
if ((mem->nlp_res->inf_norm_res_g < opts->tol_stat) &
(mem->nlp_res->inf_norm_res_b < opts->tol_eq) &
(mem->nlp_res->inf_norm_res_d < opts->tol_ineq) &
(mem->nlp_res->inf_norm_res_m < opts->tol_comp))
{
// printf("%d sqp iterations\n", sqp_iter);
// print_ocp_qp_in(mem->qp_in);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
nlp_out->total_time = total_time;
mem->time_tot = total_time;
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_SUCCESS;
return mem->status;
}
// regularize Hessian
acados_tic(&timer1);
config->regularize->regularize_hessian(config->regularize, dims->regularize,
opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// printf("\n------- qp_in (sqp iter %d) --------\n", sqp_iter);
// print_ocp_qp_in(nlp_mem->qp_in);
// if (sqp_iter==1)
// exit(1);
// (typically) no warm start at first iteration
if (sqp_iter == 0 && !opts->warm_start_first_qp)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts,
"warm_start", &tmp_int);
}
// solve qp
acados_tic(&timer1);
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out,
opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work);
mem->time_qp_sol += acados_toc(&timer1);
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time);
mem->time_qp_solver_call += tmp_time;
// compute correct dual solution in case of Hessian regularization
acados_tic(&timer1);
config->regularize->correct_dual_sol(config->regularize, dims->regularize,
opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// restore default warm start
if (sqp_iter==0)
{
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts,
"warm_start", &opts->qp_warm_start);
}
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
// printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter);
qp_iter = qp_info_->num_iter;
// save statistics of last qp solver call
if (sqp_iter+1 < mem->stat_m)
{
mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status;
mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter;
}
// compute external QP residuals (for debugging)
if (opts->ext_qp_res)
{
ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws);
if (sqp_iter+1 < mem->stat_m)
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6));
// printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, inf_norm_qp_res[0], inf_norm_qp_res[1], inf_norm_qp_res[2], inf_norm_qp_res[3]);
}
// printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter);
// print_ocp_qp_out(mem->qp_out);
// if (sqp_iter==1)
// exit(1);
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(nlp_mem->qp_in);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter);
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_QP_FAILURE;
return mem->status;
}
ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// ??? @rien
// for (int_t i = 0; i < N; i++)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme == NULL)
// continue;
// opts->sens_adj = (opts->scheme->type != exact);
// if (nlp_in->freezeSens) {
// // freeze inexact sensitivities after first SQP iteration !!
// opts->scheme->freeze = true;
// }
// }
}
// stop timer
total_time += acados_toc(&timer0);
// ocp_nlp_out_print(nlp_out);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
// printf("%d sqp iterations\n", sqp_iter);
// print_ocp_qp_in(mem->qp_in);
// maximum number of iterations reached
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_MAXITER;
return mem->status;
}
int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(all) add flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
int module_val;
config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val);
if (dims->ns[ii] != module_val)
{
printf("ocp_nlp_sqp_precompute: inconsistent dimension ns with constraint module.");
exit(1);
}
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]);
if (status != ACADOS_SUCCESS)
return status;
}
return status;
}
void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_,
char *field, int stage, int index, void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out,
opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
/* copy tmp_qp_out into sens_nlp_out */
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
// TODO rename memory_get ???
void ocp_nlp_sqp_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_)
{
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
ocp_nlp_sqp_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = mem->sqp_iter;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_qp_solver_call", field))
{
double *value = return_value_;
*value = mem->time_qp_solver_call;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("nlp_res", field))
{
ocp_nlp_res **value = return_value_;
*value = mem->nlp_res;
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else if (!strcmp("nlp_mem", field))
{
void **value = return_value_;
*value = mem->nlp_mem;
}
else if (!strcmp("qp_xcond_dims", field))
{
void **value = return_value_;
*value = dims->qp_solver->xcond_dims;
}
else if (!strcmp("qp_xcond_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_in;
}
else if (!strcmp("qp_xcond_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_out;
}
else if (!strcmp("qp_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_in;
}
else if (!strcmp("qp_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_out;
}
else if (!strcmp("qp_iter", field))
{
config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_);
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_opts_update;
config->opts_set = &ocp_nlp_sqp_opts_set;
config->opts_set_at_stage = &ocp_nlp_sqp_opts_set_at_stage;
config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp;
config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default;
config->precompute = &ocp_nlp_sqp_precompute;
config->get = &ocp_nlp_sqp_get;
return;
}
|
3964.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
{
/* E := A*B */
#pragma omp parallel for schedule(static, 1) simd
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute thread_limit(128) schedule(static, 1)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp parallel for schedule(static, 1) simd
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp target teams distribute thread_limit(128) schedule(static, 1)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp parallel for schedule(static, 1) simd
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute thread_limit(128) schedule(static, 1)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
pdgstrf.c |
/*! @file
* \brief Performs LU factorization in parallel
*
* <pre>
* -- Distributed SuperLU routine (version 4.3) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* October 1, 2014
*
* Modified:
* September 1, 1999
* Feburary 7, 2001 use MPI_Isend/MPI_Irecv
* October 15, 2008 latency-reducing panel factorization
* July 12, 2011 static scheduling and arbitrary look-ahead
* March 13, 2013 change NTAGS to MPI_TAG_UB value
* September 24, 2015 replace xLAMCH by xMACH, using C99 standard.
* December 31, 2015 rename xMACH to xMACH_DIST
*
* Sketch of the algorithm
*
* =======================
*
* The following relations hold:
* * A_kk = L_kk * U_kk
* * L_ik = Aik * U_kk^(-1)
* * U_kj = L_kk^(-1) * A_kj
*
* ----------------------------------
* | | |
* ----|-----------------------------
* | | \ U_kk| |
* | | \ | U_kj |
* | |L_kk \ | || |
* ----|-------|---------||----------
* | | | \/ |
* | | | |
* | | | |
* | | | |
* | | L_ik ==> A_ij |
* | | | |
* | | | |
* | | | |
* ----------------------------------
*
* Handle the first block of columns separately.
* * Factor diagonal and subdiagonal blocks and test for exact
* singularity. ( pdgstrf2(0), one column at a time )
* * Compute block row of U
* * Update trailing matrix
*
* Loop over the remaining blocks of columns.
* mycol = MYCOL( iam, grid );
* myrow = MYROW( iam, grid );
* N = nsupers;
* For (k = 1; k < N; ++k) {
* krow = PROW( k, grid );
* kcol = PCOL( k, grid );
* Pkk = PNUM( krow, kcol, grid );
*
* * Factor diagonal and subdiagonal blocks and test for exact
* singularity.
* if ( mycol == kcol ) {
* pdgstrf2(k), one column at a time
* }
*
* * Parallel triangular solve
* if ( iam == Pkk ) multicast L_k,k to this process row;
* if ( myrow == krow && mycol != kcol ) {
* Recv L_k,k from process Pkk;
* for (j = k+1; j < N; ++j)
* if ( PCOL( j, grid ) == mycol && A_k,j != 0 )
* U_k,j = L_k,k \ A_k,j;
* }
*
* * Parallel rank-k update
* if ( myrow == krow ) multicast U_k,k+1:N to this process column;
* if ( mycol == kcol ) multicast L_k+1:N,k to this process row;
* if ( myrow != krow ) {
* Pkj = PNUM( krow, mycol, grid );
* Recv U_k,k+1:N from process Pkj;
* }
* if ( mycol != kcol ) {
* Pik = PNUM( myrow, kcol, grid );
* Recv L_k+1:N,k from process Pik;
* }
* for (j = k+1; k < N; ++k) {
* for (i = k+1; i < N; ++i)
* if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid )
* && L_i,k != 0 && U_k,j != 0 )
* A_i,j = A_i,j - L_i,k * U_k,j;
* }
* }
*
* </pre>
*/
#include <math.h>
/*#include "mkl.h"*/
#include "superlu_ddefs.h"
#ifdef GPU_ACC
#include "cublas_utils.h"
/*#include "cublas_dgemm.h"*/
// #define NUM_CUDA_STREAMS 16
// #define NUM_CUDA_STREAMS 16
#endif
/* Various defininations */
/*
Name : SUPERNODE_PROFILE
Purpose : For SuperNode Level profiling of various measurements such as gigaflop/sec
obtained,bandwidth achived:
Overhead : Low
*/
// #define SUPERNODE_PROFILE
/*
Name : BAELINE
Purpose : baseline to compare performance against
Overhead : NA : this wont be used for running experiments
*/
// #define BASELINE
/*
Name : PHI_FRAMEWORK
Purpose : To simulate and test algorithm used for offloading Phi
Overhead : NA : this wont be used for running experiments
*/
#define PHI_FRAMEWORK
#define PDGSTRF2 pdgstrf2_trsm
#define PDGSTRS2 pdgstrs2_omp
extern void PDGSTRF2 (superlu_options_t *, int_t, int_t, double,
Glu_persist_t *, gridinfo_t *, LocalLU_t *,
MPI_Request *, int, SuperLUStat_t *, int *);
#ifdef _CRAY
extern void PDGSTRS2 (int_t, int_t, Glu_persist_t *, gridinfo_t *,
LocalLU_t *, SuperLUStat_t *, _fcd, _fcd, _fcd);
#else
extern void PDGSTRS2 (int_t, int_t, Glu_persist_t *, gridinfo_t *,
LocalLU_t *, SuperLUStat_t *);
#endif
#define ISORT /* Note: qsort() has bug on Mac */
#ifdef ISORT
extern void isort (int_t N, int_t * ARRAY1, int_t * ARRAY2);
extern void isort1 (int_t N, int_t * ARRAY);
#else
int
superlu_sort_perm (const void *arg1, const void *arg2)
{
const int_t *val1 = (const int_t *) arg1;
const int_t *val2 = (const int_t *) arg2;
return (*val2 < *val1);
}
#endif
int get_thread_per_process()
{
char* ttemp;
ttemp = getenv("THREAD_PER_PROCESS");
if(ttemp) return atoi(ttemp);
else return 1;
}
int
get_mic_offload ()
{
char *ttemp;
ttemp = getenv ("SUPERLU_MIC_OFFLOAD");
if (ttemp)
return atoi (ttemp);
else
return 0;
}
int_t
get_max_buffer_size ()
{
char *ttemp;
ttemp = getenv ("MAX_BUFFER_SIZE");
if (ttemp)
return atoi (ttemp);
else
return 5000000;
}
int_t
get_cublas_nb ()
{
char *ttemp;
ttemp = getenv ("CUBLAS_NB");
if (ttemp)
return atoi (ttemp);
else
return 64;
}
int_t
get_num_cuda_streams ()
{
char *ttemp;
ttemp = getenv ("NUM_CUDA_STREAMS");
if (ttemp)
return atoi (ttemp);
else
return 8;
}
/*int omp_get_num_threads (void);
int omp_get_thread_num (void);*/
int AssignMic(int my_rank)
{
return (my_rank+1)%2;
}
/************************************************************************/
#include "dscatter.c"
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
*
* PDGSTRF performs the LU factorization in parallel.
*
* Arguments
* =========
*
* options (input) superlu_options_t*
* The structure defines the input parameters to control
* how the LU decomposition will be performed.
* The following field should be defined:
* o ReplaceTinyPivot (yes_no_t)
* Specifies whether to replace the tiny diagonals by
* sqrt(epsilon)*norm(A) during LU factorization.
*
* m (input) int
* Number of rows in the matrix.
*
* n (input) int
* Number of columns in the matrix.
*
* anorm (input) double
* The norm of the original matrix A, or the scaled A if
* equilibration was done.
*
* LUstruct (input/output) LUstruct_t*
* The data structures to store the distributed L and U factors.
* The following fields should be defined:
*
* o Glu_persist (input) Glu_persist_t*
* Global data structure (xsup, supno) replicated on all processes,
* describing the supernode partition in the factored matrices
* L and U:
* xsup[s] is the leading column of the s-th supernode,
* supno[i] is the supernode number to which column i belongs.
*
* o Llu (input/output) LocalLU_t*
* The distributed data structures to store L and U factors.
* See superlu_ddefs.h for the definition of 'LocalLU_t'.
*
* grid (input) gridinfo_t*
* The 2D process mesh. It contains the MPI communicator, the number
* of process rows (NPROW), the number of process columns (NPCOL),
* and my process rank. It is an input argument to all the
* parallel routines.
* Grid can be initialized by subroutine SUPERLU_GRIDINIT.
* See superlu_ddefs.h for the definition of 'gridinfo_t'.
*
* stat (output) SuperLUStat_t*
* Record the statistics on runtime and floating-point operation count.
* See util.h for the definition of 'SuperLUStat_t'.
*
* info (output) int*
* = 0: successful exit
* < 0: if info = -i, the i-th argument had an illegal value
* > 0: if info = i, U(i,i) is exactly zero. The factorization has
* been completed, but the factor U is exactly singular,
* and division by zero will occur if it is used to solve a
* system of equations.
* </pre>
*/
int_t
pdgstrf(superlu_options_t * options, int m, int n, double anorm,
LUstruct_t * LUstruct, gridinfo_t * grid, SuperLUStat_t * stat, int *info)
{
#ifdef _CRAY
_fcd ftcs = _cptofcd ("N", strlen ("N"));
_fcd ftcs1 = _cptofcd ("L", strlen ("L"));
_fcd ftcs2 = _cptofcd ("N", strlen ("N"));
_fcd ftcs3 = _cptofcd ("U", strlen ("U"));
#endif
double zero = 0.0, alpha = 1.0, beta = 0.0;
int_t *xsup;
int_t *lsub, *lsub1, *usub, *Usub_buf;
int_t **Lsub_buf_2, **Usub_buf_2;
double **Lval_buf_2, **Uval_buf_2; /* pointers to starts of bufs */
double *lusup, *lusup1, *uval, *Uval_buf; /* pointer to current buf */
int_t fnz, i, ib, ijb, ilst, it, iukp, jb, jj, klst, knsupc,
lb, lib, ldv, ljb, lptr, lptr0, lptrj, luptr, luptr0, luptrj,
nlb, nub, nsupc, rel, rukp, il, iu;
int_t Pc, Pr;
int iam, kcol, krow, yourcol, mycol, myrow, pi, pj;
int j, k, lk, nsupers; /* k - current panel to work on */
int k0; /* counter of the next supernode to be factored */
int kk, kk0, kk1, kk2, jj0; /* panels in the look-ahead window */
int iukp0, rukp0, flag0, flag1;
int nsupr, nbrow, segsize;
int msg0, msg2;
int_t **Ufstnz_br_ptr, **Lrowind_bc_ptr;
double **Unzval_br_ptr, **Lnzval_bc_ptr;
int_t *index;
double *nzval;
int_t *iuip, *ruip; /* Pointers to U index/nzval; size ceil(NSUPERS/Pr). */
double *ucol;
int *indirect, *indirect2;
double *tempv, *tempv2d;
int iinfo;
int *ToRecv, *ToSendD, **ToSendR;
Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
LocalLU_t *Llu = LUstruct->Llu;
superlu_scope_t *scp;
float s_eps;
double thresh;
double *tempU2d, *tempu;
int full, ldt, ldu, lead_zero, ncols, ncb, nrb, p, pr, pc, nblocks;
int_t *etree_supno_l, *etree_supno, *blocks, *blockr, *Ublock, *Urows,
*Lblock, *Lrows, *perm_u, *sf_block, *sf_block_l, *nnodes_l,
*nnodes_u, *edag_supno_l, *recvbuf, **edag_supno;
float edag_supno_l_bytes;
#ifdef ISORT
int_t *iperm_u;
#endif
int *msgcnt; /* Count the size of the message xfer'd in each buffer:
* 0 : transferred in Lsub_buf[]
* 1 : transferred in Lval_buf[]
* 2 : transferred in Usub_buf[]
* 3 : transferred in Uval_buf[]
*/
int **msgcnts, **msgcntsU;
int *factored, *factoredU, nnodes, *sendcnts, *sdispls, *recvcnts,
*rdispls, *srows, *rrows;
etree_node *head, *tail, *ptr;
int *num_child;
int num_look_aheads, look_id, *look_ahead;
int_t *perm_c_supno, *iperm_c_supno;
MPI_Request *recv_req, **recv_reqs, **send_reqs, **send_reqs_u,
**recv_reqs_u;
MPI_Request *send_req, *U_diag_blk_send_req = NULL;
MPI_Status status;
void *attr_val;
int flag;
int iword = sizeof (int_t);
int dword = sizeof (double);
double scatter_timer = 0;
double gemm_timer = 0;
/* For measuring load imbalence in omp threads*/
double omp_load_imblc = 0.0;
double *omp_loop_time;
double CPUOffloadTimer = 0;
double CPUOffloadFlop = 0;
double CPUOffloadMop = 0;
double schur_flop_timer = 0.0;
double pdgstrf2_timer = 0.0;
double pdgstrs2_timer = 0.0;
double lookaheadupdatetimer = 0.0;
#if !defined( GPU_ACC )
/* Counter for couting memory operations */
double scatter_mem_op_counter = 0.0;
double scatter_mem_op_timer = 0.0;
double scatterL_mem_op_counter = 0.0;
double scatterL_mem_op_timer = 0.0;
double scatterU_mem_op_counter = 0.0;
double scatterU_mem_op_timer = 0.0;
double LookAheadRowSepTimer = 0.0;
double LookAheadRowSepMOP = 0.0;
double GatherTimer = 0.0;
double GatherMOP = 0.0;
double LookAheadGEMMTimer = 0.0;
double LookAheadGEMMFlOp = 0.0;
double LookAheadScatterTimer = 0.0;
double LookAheadScatterMOP = 0.0;
double schur_flop_counter = 0.0;
#endif
#if ( DEBUGlevel>=2 )
int_t num_copy = 0, num_update = 0;
#endif
#if ( PRNTlevel==3 )
int zero_msg = 0, total_msg = 0;
#endif
#if ( PROFlevel>=1 )
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
#endif
/* Test the input parameters. */
*info = 0;
if (m < 0)
*info = -2;
else if (n < 0)
*info = -3;
if (*info) {
pxerr_dist ("pdgstrf", grid, -*info);
return (-1);
}
/* Quick return if possible. */
if (m == 0 || n == 0) return 0;
/*
* Initialization.
*/
iam = grid->iam;
Pc = grid->npcol;
Pr = grid->nprow;
myrow = MYROW (iam, grid);
mycol = MYCOL (iam, grid);
nsupers = Glu_persist->supno[n - 1] + 1;
xsup = Glu_persist->xsup;
s_eps = smach_dist("Epsilon");
thresh = s_eps * anorm;
MPI_Attr_get (MPI_COMM_WORLD, MPI_TAG_UB, &attr_val, &flag);
if (!flag) {
fprintf (stderr, "Could not get TAG_UB\n");
return (-1);
}
int tag_ub = *(int *) attr_val;
#if ( PRNTlevel>=1 )
if (!iam)
printf ("MPI tag upper bound = %d\n", tag_ub);
#endif
#if ( DEBUGlevel>=1 )
if (s_eps == 0.0)
printf (" ***** warning s_eps = %e *****\n", s_eps);
CHECK_MALLOC (iam, "Enter pdgstrf()");
#endif
stat->ops[FACT] = 0.0;
stat->current_buffer = 0.0;
stat->peak_buffer = 0.0;
stat->gpu_buffer = 0.0;
/* make sure the range of look-ahead window [0, MAX_LOOKAHEADS-1] */
num_look_aheads = SUPERLU_MAX(0, SUPERLU_MIN(options->num_lookaheads, MAX_LOOKAHEADS - 1));
if (Pr * Pc > 1) {
if (!(U_diag_blk_send_req =
(MPI_Request *) SUPERLU_MALLOC (Pr * sizeof (MPI_Request))))
ABORT ("Malloc fails for U_diag_blk_send_req[].");
/* flag no outstanding Isend */
U_diag_blk_send_req[myrow] = MPI_REQUEST_NULL; /* used 0 before */
/* allocating buffers for look-ahead */
i = Llu->bufmax[0];
if (i != 0) {
if ( !(Llu->Lsub_buf_2[0] = intMalloc_dist ((num_look_aheads + 1) * ((size_t) i))) )
ABORT ("Malloc fails for Lsub_buf.");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Lsub_buf_2[jj + 1] = Llu->Lsub_buf_2[jj] + i;
}
i = Llu->bufmax[1];
if (i != 0) {
if (!(Llu->Lval_buf_2[0] = doubleMalloc_dist ((num_look_aheads + 1) * ((size_t) i))))
ABORT ("Malloc fails for Lval_buf[].");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Lval_buf_2[jj + 1] = Llu->Lval_buf_2[jj] + i;
}
i = Llu->bufmax[2];
if (i != 0) {
if (!(Llu->Usub_buf_2[0] = intMalloc_dist ((num_look_aheads + 1) * i)))
ABORT ("Malloc fails for Usub_buf_2[].");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Usub_buf_2[jj + 1] = Llu->Usub_buf_2[jj] + i;
}
i = Llu->bufmax[3];
if (i != 0) {
if (!(Llu->Uval_buf_2[0] = doubleMalloc_dist ((num_look_aheads + 1) * i)))
ABORT ("Malloc fails for Uval_buf_2[].");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Uval_buf_2[jj + 1] = Llu->Uval_buf_2[jj] + i;
}
}
log_memory( (Llu->bufmax[0] + Llu->bufmax[2]) * (num_look_aheads + 1)
* iword +
(Llu->bufmax[1] + Llu->bufmax[3]) * (num_look_aheads + 1)
* dword, stat );
/* creating pointers to the look-ahead buffers */
if (! (Lsub_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int_t *))))
ABORT ("Malloc fails for Lsub_buf_2[].");
if (! (Lval_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (double *))))
ABORT ("Malloc fails for Lval_buf_2[].");
if (! (Usub_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int_t *))))
ABORT ("Malloc fails for Uval_buf_2[].");
if (! (Uval_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (double *))))
ABORT ("Malloc fails for buf_2[].");
for (i = 0; i <= num_look_aheads; i++) {
Lval_buf_2[i] = Llu->Lval_buf_2[i];
Lsub_buf_2[i] = Llu->Lsub_buf_2[i];
Uval_buf_2[i] = Llu->Uval_buf_2[i];
Usub_buf_2[i] = Llu->Usub_buf_2[i];
}
if (!(msgcnts = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int *))))
ABORT ("Malloc fails for msgcnts[].");
if (!(msgcntsU = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int *))))
ABORT ("Malloc fails for msgcntsU[].");
for (i = 0; i <= num_look_aheads; i++) {
if (!(msgcnts[i] = SUPERLU_MALLOC (4 * sizeof (int))))
ABORT ("Malloc fails for msgcnts[].");
if (!(msgcntsU[i] = SUPERLU_MALLOC (4 * sizeof (int))))
ABORT ("Malloc fails for msgcntsU[].");
}
if (! (recv_reqs_u = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for recv_reqs_u[].");
if (! (send_reqs_u = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for send_reqs_u[].");
if (! (send_reqs = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for send_reqs_u[].");
if (! (recv_reqs = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for recv_reqs[].");
for (i = 0; i <= num_look_aheads; i++) {
if (!(recv_reqs_u[i] = (MPI_Request *) SUPERLU_MALLOC (2 * sizeof (MPI_Request))))
ABORT ("Malloc fails for recv_req_u[i].");
if (!(send_reqs_u[i] = (MPI_Request *) SUPERLU_MALLOC (2 * Pr * sizeof (MPI_Request))))
ABORT ("Malloc fails for send_req_u[i].");
if (!(send_reqs[i] = (MPI_Request *) SUPERLU_MALLOC (2 * Pc * sizeof (MPI_Request))))
ABORT ("Malloc fails for send_reqs[i].");
if (!(recv_reqs[i] = (MPI_Request *) SUPERLU_MALLOC (4 * sizeof (MPI_Request))))
ABORT ("Malloc fails for recv_req[].");
send_reqs[i][0] = send_reqs[i][1] = MPI_REQUEST_NULL;
recv_reqs[i][0] = recv_reqs[i][1] = MPI_REQUEST_NULL;
}
if (!(factored = SUPERLU_MALLOC (nsupers * sizeof (int_t))))
ABORT ("Malloc fails for factored[].");
if (!(factoredU = SUPERLU_MALLOC (nsupers * sizeof (int_t))))
ABORT ("Malloc fails for factoredU[].");
for (i = 0; i < nsupers; i++) factored[i] = factoredU[i] = -1;
log_memory(2 * nsupers * iword, stat);
int num_threads = 1;
#ifdef _OPENMP
#pragma omp parallel default(shared)
{
if (omp_get_thread_num () == 0) {
num_threads = omp_get_num_threads ();
}
}
#endif
#if 0
omp_loop_time = (double *) _mm_malloc (sizeof (double) * num_threads,64);
#else
omp_loop_time = (double *) doubleMalloc_dist(num_threads);
#endif
#if ( PRNTlevel>=1 )
if(!iam) printf(".. Starting with %d OpenMP threads \n", num_threads );
double tt1 = SuperLU_timer_ ();
#endif
nblocks = 0;
ncb = nsupers / Pc;
nrb = nsupers / Pr;
int nstreams = get_num_cuda_streams ();
/* int nstreams = NUM_CUDA_STREAMS; */
/* in order to have dynamic scheduling */
int *full_u_cols;
int *blk_ldu;
#if 0
full_u_cols = (int_t *) _mm_malloc (sizeof (int_t) * ncb,64);
blk_ldu = (int_t *) _mm_malloc (sizeof (int_t) * ncb,64);
#else
full_u_cols = SUPERLU_MALLOC(ncb * sizeof(int));
blk_ldu = SUPERLU_MALLOC(ncb * sizeof(int));
#endif
log_memory(2 * ncb * iword, stat);
/* array holding last column blk for each partition,
used in SchCompUdt--CUDA.c */
#if 0
int *stream_end_col = (int_t *) _mm_malloc (sizeof (int_t) * nstreams,64);
#else
int *stream_end_col = SUPERLU_MALLOC( nstreams * sizeof(int) );
#endif
/* insert a check condition here */
#if 0 /* Sherry: not used? */
/* This bunch is used for static scheduling */
pair *full_col_count = (pair *) _mm_malloc (sizeof (pair) * ncb,64);
int_t *count_cols, *sum_cols, *partition;
count_cols = (int_t *) _mm_malloc (sizeof (int_t) * num_threads,64);
sum_cols = (int_t *) _mm_malloc (sizeof (int_t) * num_threads,64);
partition = (int_t *) _mm_malloc (sizeof (int_t) * num_threads * ncb,64);
int_t ldp = ncb;
#endif
/* ##################################################################
* Compute a good static schedule based on the factorization task graph.
* ################################################################## */
perm_c_supno = SUPERLU_MALLOC (2 * nsupers * sizeof (int_t));
iperm_c_supno = perm_c_supno + nsupers;
static_schedule(options, m, n, LUstruct, grid, stat,
perm_c_supno, iperm_c_supno, info);
#if ( DEBUGlevel >= 2 )
PrintInt10("schedule:perm_c_supno", nsupers, perm_c_supno);
printf("[%d] .. Turn off static schedule for debugging ..\n", iam);
/* Turn off static schedule */
for (i = 0; i < nsupers; ++i) perm_c_supno[i] = iperm_c_supno[i] = i;
#endif
/* ################################################################## */
/* constructing look-ahead table to indicate the last dependency */
int *look_ahead_l;
stat->num_look_aheads = num_look_aheads;
look_ahead_l = SUPERLU_MALLOC (nsupers * sizeof (int));
look_ahead = SUPERLU_MALLOC (nsupers * sizeof (int));
for (lb = 0; lb < nsupers; lb++) look_ahead_l[lb] = -1;
log_memory(3 * nsupers * iword, stat);
/* go through U-factor */
for (lb = 0; lb < nrb; ++lb) {
ib = lb * Pr + myrow;
index = Llu->Ufstnz_br_ptr[lb];
if (index) { /* Not an empty row */
k = BR_HEADER;
for (j = 0; j < index[0]; ++j) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += UB_DESCRIPTOR + SuperSize (index[k]);
}
}
}
if (myrow < nsupers % grid->nprow) {
ib = nrb * Pr + myrow;
index = Llu->Ufstnz_br_ptr[nrb];
if (index) { /* Not an empty row */
k = BR_HEADER;
for (j = 0; j < index[0]; ++j) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += UB_DESCRIPTOR + SuperSize (index[k]);
}
}
}
if (options->SymPattern == NO) {
/* go through L-factor */
for (lb = 0; lb < ncb; lb++) {
ib = lb * Pc + mycol;
index = Llu->Lrowind_bc_ptr[lb];
if (index) {
k = BC_HEADER;
for (j = 0; j < index[0]; j++) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += LB_DESCRIPTOR + index[k + 1];
}
}
}
if (mycol < nsupers % grid->npcol) {
ib = ncb * Pc + mycol;
index = Llu->Lrowind_bc_ptr[ncb];
if (index) {
k = BC_HEADER;
for (j = 0; j < index[0]; j++) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += LB_DESCRIPTOR + index[k + 1];
}
}
}
}
MPI_Allreduce (look_ahead_l, look_ahead, nsupers, MPI_INT, MPI_MAX, grid->comm);
SUPERLU_FREE (look_ahead_l);
#ifdef ISORT
iperm_u = SUPERLU_MALLOC (nsupers * sizeof (int_t));
perm_u = SUPERLU_MALLOC (nsupers * sizeof (int_t));
#else
perm_u = SUPERLU_MALLOC (2 * nsupers * sizeof (int_t));
#endif
log_memory(nsupers * iword, stat);
#if ( PRNTlevel>=1 )
if (grid->iam == 0)
printf (" * init: %e seconds\n", SuperLU_timer_ () - tt1);
#endif
k = sp_ienv_dist (3); /* max supernode size */
#if 0
if ( !(Llu->ujrow = doubleMalloc_dist(k*(k+1)/2)) )
ABORT("Malloc fails for ujrow[].");
#else
/* Instead of half storage, we'll do full storage */
if (!(Llu->ujrow = doubleMalloc_dist (k * k)))
ABORT ("Malloc fails for ujrow[].");
log_memory(k * k * iword, stat);
#endif
#if ( PRNTlevel>=1 )
if (!iam) {
printf (".. thresh = s_eps %e * anorm %e = %e\n", s_eps, anorm,
thresh);
printf
(".. Buffer size: Lsub %ld\tLval %ld\tUsub %ld\tUval %ld\tLDA %ld\n",
(long int) Llu->bufmax[0], (long int) Llu->bufmax[1],
(long int) Llu->bufmax[2], (long int) Llu->bufmax[3],
(long int) Llu->bufmax[4]);
}
#endif
Lrowind_bc_ptr = Llu->Lrowind_bc_ptr;
Lnzval_bc_ptr = Llu->Lnzval_bc_ptr;
Ufstnz_br_ptr = Llu->Ufstnz_br_ptr;
Unzval_br_ptr = Llu->Unzval_br_ptr;
ToRecv = Llu->ToRecv;
ToSendD = Llu->ToSendD;
ToSendR = Llu->ToSendR;
ldt = sp_ienv_dist (3); /* Size of maximum supernode */
k = CEILING (nsupers, Pr); /* Number of local block rows */
/* Following circuit is for finding maximum block size */
int local_max_row_size = 0;
int max_row_size;
for (int i = 0; i < nsupers; ++i) {
int tpc = PCOL (i, grid);
if (mycol == tpc) {
lk = LBj (i, grid);
lsub = Lrowind_bc_ptr[lk];
if (lsub != NULL) {
local_max_row_size = SUPERLU_MAX (local_max_row_size, lsub[1]);
}
}
}
/* Max row size is global reduction of within A row */
MPI_Allreduce (&local_max_row_size, &max_row_size, 1, MPI_INT, MPI_MAX, (grid->rscp.comm));
/* Buffer size is max of of look ahead window */
/* int_t buffer_size =
SUPERLU_MAX (max_row_size * num_threads * ldt,
get_max_buffer_size ()); */
int cublas_nb = get_cublas_nb();
#ifdef GPU_ACC
int buffer_size = SUPERLU_MAX(max_row_size*nstreams*cublas_nb,get_max_buffer_size());
#else
int Threads_per_process = get_thread_per_process();
int buffer_size = SUPERLU_MAX(max_row_size*Threads_per_process*ldt,get_max_buffer_size());
#endif
/* symmetric assumption */
/* Note that in following expression 8 can be anything
as long as its not too big */
int bigu_size = 8 * sp_ienv_dist (3) * (max_row_size);
#if ( PRNTlevel>=1 )
if(!iam) printf("[%d] .. BIG U size %d \n", iam, bigu_size);
#endif
#ifdef GPU_ACC
// printf("hello 1\n");
double* bigU;
if ( checkCuda(cudaHostAlloc((void**)&bigU, bigu_size * sizeof(double), cudaHostAllocDefault)) )
ABORT("Malloc fails for dgemm buffer U ");
int bigv_size = buffer_size;
#if ( PRNTlevel>=1 )
if (!iam) printf("[%d] .. BIG V size %d\n", iam, bigv_size);
#endif
double* bigV;
if ( checkCuda(cudaHostAlloc((void**)&bigV, bigv_size * sizeof(double) ,cudaHostAllocDefault)) )
ABORT("Malloc fails for dgemm buffer V");
DisplayHeader();
#if ( PRNTlevel>=1 )
printf(" Starting with %d Cuda Streams \n",nstreams );
#endif
cublasHandle_t *handle;
handle = (cublasHandle_t *) SUPERLU_MALLOC(sizeof(cublasHandle_t)*nstreams);
for(int i = 0; i < nstreams; i++) handle[i] = create_handle();
// creating streams
cudaStream_t *streams;
streams = (cudaStream_t *) SUPERLU_MALLOC(sizeof(cudaStream_t)*nstreams);
for (int i = 0; i < nstreams; ++i)
checkCuda( cudaStreamCreate(&streams[i]) );
// allocating data in device
double *dA, *dB, *dC;
cudaError_t cudaStat;
#if 0
// cudaStat = cudaMalloc( (void**)&dA, m*k*sizeof(double));
// HOw much should be the size of dA?
// for time being just making it
// cudaStat = cudaMalloc( (void**)&dA, ((max_row_size*sp_ienv_dist(3)))* sizeof(double));
#endif
cudaStat = cudaMalloc( (void**)&dA, max_row_size*sp_ienv_dist(3)* sizeof(double));
if (cudaStat!= cudaSuccess) {
fprintf(stderr, "!!!! Error in allocating A in the device %ld \n",m*k*sizeof(double) );
return 1;
}
// size of B should be max_supernode_size*buffer
cudaStat = cudaMalloc((void**)&dB, bigu_size * sizeof(double));
if (cudaStat!= cudaSuccess) {
fprintf(stderr, "!!!! Error in allocating B in the device %ld \n",n*k*sizeof(double));
return 1;
}
cudaStat = cudaMalloc((void**)&dC, buffer_size* sizeof(double) );
if (cudaStat!= cudaSuccess) {
fprintf(stderr, "!!!! Error in allocating C in the device \n" );
return 1;
}
stat->gpu_buffer += ( max_row_size * sp_ienv_dist(3)
+ bigu_size + buffer_size ) * dword;
#else /* not CUDA */
double* bigU;
if ( !(bigU = doubleMalloc_dist(bigu_size)) )
ABORT ("Malloc fails for dgemm u buff U");
//Maximum size of of bigU= sqrt(buffsize) ?
int bigv_size = 8 * ldt * ldt * num_threads;
#if ( PRNTlevel>=1 )
if (!iam) printf("[%d] .. BIG V size %d\n", iam, bigv_size);
#endif
double *bigV;
if ( !(bigV = doubleMalloc_dist(bigv_size)) )
ABORT ("Malloc failed for dgemm buffer V");
#endif
log_memory((bigv_size + bigu_size) * dword, stat);
// mlock(bigU,(bigu_size) * sizeof (double));
#if ( PRNTlevel>=1 )
if(!iam) {
printf (" Max row size is %d \n", max_row_size);
printf (" Using buffer_size of %d \n", buffer_size);
printf (" Threads per process %d \n", num_threads);
}
#endif
if (!(tempv2d = doubleCalloc_dist (2 * ((size_t) ldt) * ldt)))
ABORT ("Calloc fails for tempv2d[].");
tempU2d = tempv2d + ldt * ldt;
if (!(indirect = SUPERLU_MALLOC (ldt * num_threads * sizeof(int))))
ABORT ("Malloc fails for indirect[].");
if (!(indirect2 = SUPERLU_MALLOC (ldt * num_threads * sizeof(int))))
ABORT ("Malloc fails for indirect[].");
if (!(iuip = intMalloc_dist (k))) ABORT ("Malloc fails for iuip[].");
if (!(ruip = intMalloc_dist (k))) ABORT ("Malloc fails for ruip[].");
log_memory(2 * ldt *ldt * dword + 2 * ldt * num_threads * iword
+ 2 * k * iword, stat);
int_t *lookAheadFullRow,*lookAheadStRow,*lookAhead_lptr,*lookAhead_ib,
*RemainFullRow,*RemainStRow,*Remain_lptr,*Remain_ib;
lookAheadFullRow = intMalloc_dist( (num_look_aheads+1) );
lookAheadStRow = intMalloc_dist( (num_look_aheads+1) );
lookAhead_lptr = intMalloc_dist( (num_look_aheads+1) );
lookAhead_ib = intMalloc_dist( (num_look_aheads+1) );
int_t mrb= (nsupers+Pr-1) / Pr;
int_t mcb= (nsupers+Pc-1) / Pc;
RemainFullRow = intMalloc_dist(mrb);
RemainStRow = intMalloc_dist(mrb);
#if 0
Remain_lptr = (int *) _mm_malloc(sizeof(int)*mrb,1);
#else
Remain_lptr = intMalloc_dist(mrb);
#endif
// mlock(Remain_lptr, sizeof(int)*mrb );
Remain_ib = intMalloc_dist(mrb);
Remain_info_t *Remain_info;
#if 0
Remain_info = (Remain_info_t *) _mm_malloc(mrb*sizeof(Remain_info_t),64);
#else
Remain_info = (Remain_info_t *) SUPERLU_MALLOC(mrb*sizeof(Remain_info_t));
#endif
log_memory(4 * mrb * iword + mrb * sizeof(Remain_info_t), stat);
double *lookAhead_L_buff, *Remain_L_buff;
Ublock_info_t *Ublock_info;
ldt = sp_ienv_dist (3); /* max supernode size */
lookAhead_L_buff = doubleMalloc_dist(ldt*ldt* (num_look_aheads+1) );
log_memory(ldt * ldt * (num_look_aheads+1) * dword, stat);
#if 0
Remain_L_buff = (double *) _mm_malloc( sizeof(double)*(Llu->bufmax[1]),64);
Ublock_info = (Ublock_info_t *) _mm_malloc(mcb*sizeof(Ublock_info_t),64);
int * Ublock_info_iukp = (int *) _mm_malloc(mcb*sizeof(int),64);
int * Ublock_info_rukp = (int *) _mm_malloc(mcb*sizeof(int),64);
int * Ublock_info_jb = (int *) _mm_malloc(mcb*sizeof(int),64);
#else
Remain_L_buff = doubleMalloc_dist(Llu->bufmax[1]);
Ublock_info = (Ublock_info_t *) SUPERLU_MALLOC(mcb*sizeof(Ublock_info_t));
int *Ublock_info_iukp = (int *) SUPERLU_MALLOC(mcb*sizeof(int));
int *Ublock_info_rukp = (int *) SUPERLU_MALLOC(mcb*sizeof(int));
int *Ublock_info_jb = (int *) SUPERLU_MALLOC(mcb*sizeof(int));
#endif
log_memory(Llu->bufmax[1] * dword, stat);
double NetSchurUpTimer = 0;
double pdgstrfTimer= SuperLU_timer_();
/* ##################################################################
** Handle first block column separately to start the pipeline. **
################################################################## */
look_id = 0;
msgcnt = msgcnts[0];
send_req = send_reqs[0];
recv_req = recv_reqs[0];
k0 = 0;
k = perm_c_supno[0];
kcol = PCOL (k, grid);
krow = PROW (k, grid);
if (mycol == kcol) {
double ttt1 = SuperLU_timer_();
/* panel factorization */
PDGSTRF2 (options, k0, k, thresh, Glu_persist, grid, Llu,
U_diag_blk_send_req, tag_ub, stat, info);
pdgstrf2_timer += SuperLU_timer_()-ttt1;
scp = &grid->rscp; /* The scope of process row. */
/* Multicasts numeric values of L(:,0) to process rows. */
lk = LBj (k, grid); /* Local block number. */
lsub = Lrowind_bc_ptr[lk];
lusup = Lnzval_bc_ptr[lk];
if (lsub) {
msgcnt[0] = lsub[1] + BC_HEADER + lsub[0] * LB_DESCRIPTOR;
msgcnt[1] = lsub[1] * SuperSize (k);
} else {
msgcnt[0] = msgcnt[1] = 0;
}
for (pj = 0; pj < Pc; ++pj) {
if (ToSendR[lk][pj] != EMPTY) {
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Isend (lsub, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, 0) /* 0 */ ,
scp->comm, &send_req[pj]);
MPI_Isend (lusup, msgcnt[1], MPI_DOUBLE, pj, SLU_MPI_TAG (1, 0) /* 1 */ ,
scp->comm, &send_req[pj + Pc]);
#if ( DEBUGlevel>=2 )
printf ("[%d] first block cloumn Send L(:,%4d): lsub %4d, lusup %4d to Pc %2d\n",
iam, 0, msgcnt[0], msgcnt[1], pj);
#endif
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
msg_cnt += 2;
msg_vol += msgcnt[0] * iword + msgcnt[1] * dword;
#endif
} /* end if */
} /* end for pj ... */
} else { /* Post immediate receives. */
if (ToRecv[k] >= 1) { /* Recv block column L(:,0). */
scp = &grid->rscp; /* The scope of process row. */
MPI_Irecv (Lsub_buf_2[0], Llu->bufmax[0], mpi_int_t, kcol,
SLU_MPI_TAG (0, 0) /* 0 */ ,
scp->comm, &recv_req[0]);
MPI_Irecv (Lval_buf_2[0], Llu->bufmax[1], MPI_DOUBLE, kcol,
SLU_MPI_TAG (1, 0) /* 1 */ ,
scp->comm, &recv_req[1]);
}
} /* end if mycol == 0 */
factored[k] = 0; /* flag column k as factored. */
/* post receive of first U-row */
if (myrow != krow) {
if (ToRecv[k] == 2) { /* Recv block row U(k,:). */
scp = &grid->cscp; /* The scope of process column. */
Usub_buf = Llu->Usub_buf_2[0];
Uval_buf = Llu->Uval_buf_2[0];
MPI_Irecv (Usub_buf, Llu->bufmax[2], mpi_int_t, krow,
SLU_MPI_TAG (2, 0) /* 2%tag_ub */ ,
scp->comm, &recv_reqs_u[0][0]);
MPI_Irecv (Uval_buf, Llu->bufmax[3], MPI_DOUBLE, krow,
SLU_MPI_TAG (3, 0) /* 3%tag_ub */ ,
scp->comm, &recv_reqs_u[0][1]);
}
}
/* ##################################################################
**** MAIN LOOP ****
################################################################## */
for (k0 = 0; k0 < nsupers; ++k0) {
k = perm_c_supno[k0];
/* ============================================ *
* ======== look-ahead the new columns ======== *
* ============================================ */
/* tt1 = SuperLU_timer_(); */
if (k0 == 0) { /* look-ahead all the columns in the window */
kk1 = k0 + 1;
kk2 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1);
} else { /* look-ahead one new column after the current window */
kk1 = k0 + num_look_aheads;
kk2 = SUPERLU_MIN (kk1, nsupers - 1);
}
for (kk0 = kk1; kk0 <= kk2; kk0++) {
/* loop through look-ahead window */
kk = perm_c_supno[kk0]; /* use the ordering from static schedule */
look_id = kk0 % (1 + num_look_aheads); /* which column in window */
if (look_ahead[kk] < k0) { /* does not depend on current column */
kcol = PCOL (kk, grid);
if (mycol == kcol) {
/* Panel factorization -- Factor diagonal and subdiagonal
L blocks and test for exact singularity. */
factored[kk] = 0; /* flag column kk as factored */
double ttt1 = SuperLU_timer_();
PDGSTRF2 (options, kk0, kk, thresh, Glu_persist,
grid, Llu, U_diag_blk_send_req, tag_ub, stat, info);
pdgstrf2_timer += SuperLU_timer_() - ttt1;
/* Multicasts numeric values of L(:,kk) to process rows. */
/* ttt1 = SuperLU_timer_(); */
msgcnt = msgcnts[look_id]; /* point to the proper count array */
send_req = send_reqs[look_id];
lk = LBj (kk, grid); /* Local block number. */
lsub1 = Lrowind_bc_ptr[lk];
if (lsub1) {
msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR;
msgcnt[1] = lsub1[1] * SuperSize (kk);
} else {
msgcnt[0] = 0;
msgcnt[1] = 0;
}
scp = &grid->rscp; /* The scope of process row. */
for (pj = 0; pj < Pc; ++pj) {
if (ToSendR[lk][pj] != EMPTY) {
lusup1 = Lnzval_bc_ptr[lk];
MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj,
SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &send_req[pj]);
MPI_Isend (lusup1, msgcnt[1], MPI_DOUBLE, pj,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &send_req[pj + Pc]);
#if ( DEBUGlevel>=2 )
printf ("[%d] -1- Send L(:,%4d): #lsub1 %4d, #lusup1 %4d right to Pj %2d\n",
iam, kk, msgcnt[0], msgcnt[1], pj);
#endif
}
}
/* stat->time9 += SuperLU_timer_() - ttt1; */
} else { /* Post Recv of block column L(:,kk). */
/* double ttt1 = SuperLU_timer_(); */
if (ToRecv[kk] >= 1) {
scp = &grid->rscp; /* The scope of process row. */
recv_req = recv_reqs[look_id];
MPI_Irecv (Lsub_buf_2[look_id], Llu->bufmax[0],
mpi_int_t, kcol, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &recv_req[0]);
MPI_Irecv (Lval_buf_2[look_id], Llu->bufmax[1],
MPI_DOUBLE, kcol,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &recv_req[1]);
}
/* stat->time10 += SuperLU_timer_() - ttt1; */
} /* end if mycol == Pc(kk) */
} /* end if look-ahead */
/* post irecv for U-row look-ahead */
krow = PROW (kk, grid);
if (myrow != krow) {
if (ToRecv[kk] == 2) { /* post iRecv block row U(k,:). */
scp = &grid->cscp; /* The scope of process column. */
Usub_buf = Llu->Usub_buf_2[look_id];
Uval_buf = Llu->Uval_buf_2[look_id];
MPI_Irecv (Usub_buf, Llu->bufmax[2], mpi_int_t, krow,
SLU_MPI_TAG (2, kk0) /* (4*kk0+2)%tag_ub */ ,
scp->comm, &recv_reqs_u[look_id][0]);
MPI_Irecv (Uval_buf, Llu->bufmax[3], MPI_DOUBLE, krow,
SLU_MPI_TAG (3, kk0) /* (4*kk0+3)%tag_ub */ ,
scp->comm, &recv_reqs_u[look_id][1]);
}
}
} /* end for each column in look-ahead window */
/* stat->time4 += SuperLU_timer_()-tt1; */
/* ================================= *
* == looking-ahead the U columns == *
* ================================= */
kk1 = k0;
kk2 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1);
for (kk0 = kk1; kk0 < kk2; kk0++) {
kk = perm_c_supno[kk0];
if (factoredU[kk0] != 1 && look_ahead[kk] < k0) {
kcol = PCOL (kk, grid);
krow = PROW (kk, grid);
lk = LBj (kk, grid); /* Local block number. */
look_id = kk0 % (1 + num_look_aheads);
msgcnt = msgcntsU[look_id];
recv_req = recv_reqs[look_id];
/* ================================================= *
* checking if diagonal block has been received *
* for panel factorization of U in look-ahead window *
* ================================================= */
if (mycol == kcol) {
flag0 = flag1 = 1;
msgcnt[0] = msgcnt[1] = -1;
} else {
flag0 = flag1 = 0;
if (ToRecv[kk] >= 1) {
if (recv_req[0] != MPI_REQUEST_NULL) {
MPI_Test (&recv_req[0], &flag0, &status);
if (flag0) {
MPI_Get_count (&status, mpi_int_t, &msgcnt[0]);
recv_req[0] = MPI_REQUEST_NULL;
}
} else flag0 = 1;
if (recv_req[1] != MPI_REQUEST_NULL) {
MPI_Test (&recv_req[1], &flag1, &status);
if (flag1) {
MPI_Get_count (&status, mpi_int_t, &msgcnt[1]);
recv_req[1] = MPI_REQUEST_NULL;
}
} else flag1 = 1;
} else msgcnt[0] = 0;
}
if (flag0 && flag1) {
/* tt1 = SuperLU_timer_(); */
scp = &grid->cscp; /* The scope of process column. */
if (myrow == krow) {
factoredU[kk0] = 1;
/* Parallel triangular solve across process row *krow* --
U(k,j) = L(k,k) \ A(k,j). */
/* double ttt2 = SuperLU_timer_(); */
double ttt2 = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel
#endif
{
PDGSTRS2 (kk0, kk, Glu_persist, grid, Llu,
stat);
}
pdgstrs2_timer += SuperLU_timer_()-ttt2;
/* stat->time8 += SuperLU_timer_()-ttt2; */
/* Multicasts U(k,:) to process columns. */
lk = LBi (kk, grid);
usub = Ufstnz_br_ptr[lk];
uval = Unzval_br_ptr[lk];
if (usub) {
msgcnt[2] = usub[2];
msgcnt[3] = usub[1];
} else {
msgcnt[2] = msgcnt[3] = 0;
}
if (ToSendD[lk] == YES) {
for (pi = 0; pi < Pr; ++pi) {
if (pi != myrow) {
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Isend (usub, msgcnt[2], mpi_int_t, pi,
SLU_MPI_TAG (2, kk0), /* (4*kk0+2)%tag_ub */
scp->comm, &send_reqs_u[look_id][pi]);
MPI_Isend (uval, msgcnt[3], MPI_DOUBLE,
pi, SLU_MPI_TAG (3, kk0), /* (4*kk0+3)%tag_ub */
scp->comm, &send_reqs_u[look_id][pi + Pr]);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
msg_cnt += 2;
msg_vol += msgcnt[2] * iword + msgcnt[3] * dword;
#endif
#if ( DEBUGlevel>=2 )
printf ("[%d] Send U(%4d,:) to Pr %2d\n",
iam, k, pi);
#endif
} /* if pi ... */
} /* for pi ... */
} /* if ToSendD ... */
/* stat->time2 += SuperLU_timer_()-tt1; */
} /* end if myrow == krow */
} /* end if flag0 ... */
} /* end if factoredU[] ... */
} /* end for kk0 ... */
/* ============================================== *
* == start processing the current row of U == *
* ============================================== */
knsupc = SuperSize (k);
krow = PROW (k, grid);
kcol = PCOL (k, grid);
/* tt1 = SuperLU_timer_(); */
look_id = k0 % (1 + num_look_aheads);
recv_req = recv_reqs[look_id];
send_req = send_reqs[look_id];
msgcnt = msgcnts[look_id];
Usub_buf = Llu->Usub_buf_2[look_id];
Uval_buf = Llu->Uval_buf_2[look_id];
if (mycol == kcol) {
lk = LBj (k, grid); /* Local block number. */
for (pj = 0; pj < Pc; ++pj) {
/* Wait for Isend to complete before using lsub/lusup. */
if (ToSendR[lk][pj] != EMPTY) {
MPI_Wait (&send_req[pj], &status);
MPI_Wait (&send_req[pj + Pc], &status);
}
}
lsub = Lrowind_bc_ptr[lk];
lusup = Lnzval_bc_ptr[lk];
} else {
if (ToRecv[k] >= 1) { /* Recv block column L(:,k). */
scp = &grid->rscp; /* The scope of process row. */
/* ============================================ *
* waiting for L(:,kk) for outer-product uptate *
* if iam in U(kk,:) then *
* the diagonal block did not reach in time *
* for panel factorization of U(k,:) *
* ============================================ */
#if ( PROFlevel>=1 )
TIC (t1);
#endif
if (recv_req[0] != MPI_REQUEST_NULL) {
MPI_Wait (&recv_req[0], &status);
MPI_Get_count (&status, mpi_int_t, &msgcnt[0]);
recv_req[0] = MPI_REQUEST_NULL;
} else {
msgcnt[0] = msgcntsU[look_id][0];
#if (DEBUGlevel>=2)
printf("\t[%d] k=%d, look_id=%d, recv_req[0] == MPI_REQUEST_NULL, msgcnt[0] = %d\n",
iam, k, look_id, msgcnt[0]);
#endif
}
if (recv_req[1] != MPI_REQUEST_NULL) {
MPI_Wait (&recv_req[1], &status);
MPI_Get_count (&status, MPI_DOUBLE, &msgcnt[1]);
recv_req[1] = MPI_REQUEST_NULL;
} else {
msgcnt[1] = msgcntsU[look_id][1];
#if (DEBUGlevel>=2)
printf("\t[%d] k=%d, look_id=%d, recv_req[1] == MPI_REQUEST_NULL, msgcnt[1] = %d\n",
iam, k, look_id, msgcnt[1]);
#endif
}
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
#endif
#if ( DEBUGlevel>=2 )
printf("[%d] Recv L(:,%4d): #lsub %4d, #lusup %4d from Pc %2d\n",
iam, k, msgcnt[0], msgcnt[1], kcol);
fflush (stdout);
#endif
#if ( PRNTlevel==3 )
++total_msg;
if (!msgcnt[0]) ++zero_msg;
#endif
} else {
msgcnt[0] = 0;
}
lsub = Lsub_buf_2[look_id];
lusup = Lval_buf_2[look_id];
} /* if mycol = Pc(k) */
/* stat->time1 += SuperLU_timer_()-tt1; */
scp = &grid->cscp; /* The scope of process column. */
/* tt1 = SuperLU_timer_(); */
if (myrow == krow) {
lk = LBi (k, grid);
usub = Ufstnz_br_ptr[lk];
uval = Unzval_br_ptr[lk];
if (factoredU[k0] == -1) {
/* Parallel triangular solve across process row *krow* --
U(k,j) = L(k,k) \ A(k,j). */
double ttt2 = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel
#endif
{
PDGSTRS2 (k0, k, Glu_persist, grid, Llu, stat);
}
pdgstrs2_timer += SuperLU_timer_() - ttt2;
/* Multicasts U(k,:) along process columns. */
if (usub) {
msgcnt[2] = usub[2];
msgcnt[3] = usub[1];
} else {
msgcnt[2] = msgcnt[3] = 0;
}
if (ToSendD[lk] == YES) {
for (pi = 0; pi < Pr; ++pi) {
if (pi != myrow) {
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Send (usub, msgcnt[2], mpi_int_t, pi,
SLU_MPI_TAG (2, k0), /* (4*k0+2)%tag_ub */
scp->comm);
MPI_Send (uval, msgcnt[3], MPI_DOUBLE, pi,
SLU_MPI_TAG (3, k0), /* (4*k0+3)%tag_ub */
scp->comm);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
msg_cnt += 2;
msg_vol += msgcnt[2] * iword + msgcnt[3] * dword;
#endif
#if ( DEBUGlevel>=2 )
printf ("[%d] Send U(%4d,:) down to Pr %2d\n", iam, k, pi);
#endif
} /* if pi ... */
} /* for pi ... */
} /* if ToSendD ... */
} else {
/* =========================================== *
* waiting for U(k,:) for outer-product update *
* =========================================== */
if (ToSendD[lk] == YES) {
for (pi = 0; pi < Pr; ++pi) {
if (pi != myrow) {
MPI_Wait (&send_reqs_u[look_id][pi], &status);
MPI_Wait (&send_reqs_u[look_id][pi + Pr], &status);
}
}
}
msgcnt[2] = msgcntsU[look_id][2];
msgcnt[3] = msgcntsU[look_id][3];
}
/* stat->time2 += SuperLU_timer_()-tt1; */
} else { /* myrow != krow */
/* ========================================= *
* wait for U(k,:) for outer-product updates *
* ========================================= */
if (ToRecv[k] == 2) { /* Recv block row U(k,:). */
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Wait (&recv_reqs_u[look_id][0], &status);
MPI_Get_count (&status, mpi_int_t, &msgcnt[2]);
MPI_Wait (&recv_reqs_u[look_id][1], &status);
MPI_Get_count (&status, MPI_DOUBLE, &msgcnt[3]);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
#endif
usub = Usub_buf;
uval = Uval_buf;
#if ( DEBUGlevel>=2 )
printf ("[%d] Recv U(%4d,:) from Pr %2d\n", iam, k, krow);
#endif
#if ( PRNTlevel==3 )
++total_msg;
if (!msgcnt[2]) ++zero_msg;
#endif
} else {
msgcnt[2] = 0;
}
/* stat->time6 += SuperLU_timer_()-tt1; */
} /* if myrow == Pr(k) */
/*
* Parallel rank-k update; pair up blocks L(i,k) and U(k,j).
* for (j = k+1; k < N; ++k) {
* for (i = k+1; i < N; ++i)
* if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid )
* && L(i,k) != 0 && U(k,j) != 0 )
* A(i,j) = A(i,j) - L(i,k) * U(k,j);
*/
msg0 = msgcnt[0];
msg2 = msgcnt[2];
/* tt1 = SuperLU_timer_(); */
if (msg0 && msg2) { /* L(:,k) and U(k,:) are not empty. */
nsupr = lsub[1]; /* LDA of lusup. */
if (myrow == krow) { /* Skip diagonal block L(k,k). */
lptr0 = BC_HEADER + LB_DESCRIPTOR + lsub[BC_HEADER + 1];
luptr0 = knsupc;
nlb = lsub[0] - 1;
} else {
lptr0 = BC_HEADER;
luptr0 = 0;
nlb = lsub[0];
}
iukp = BR_HEADER; /* Skip header; Pointer to index[] of U(k,:) */
rukp = 0; /* Pointer to nzval[] of U(k,:) */
nub = usub[0]; /* Number of blocks in the block row U(k,:) */
klst = FstBlockC (k + 1);
/* -------------------------------------------------------------
Update the look-ahead block columns A(:,k+1:k+num_look_ahead)
------------------------------------------------------------- */
iukp0 = iukp;
rukp0 = rukp;
/* reorder the remaining columns in bottome-up */
/* TAU_STATIC_TIMER_START("LOOK_AHEAD_UPDATE"); */
for (jj = 0; jj < nub; jj++) {
#ifdef ISORT
iperm_u[jj] = iperm_c_supno[usub[iukp]]; /* Global block number of block U(k,j). */
perm_u[jj] = jj;
#else
perm_u[2 * jj] = iperm_c_supno[usub[iukp]]; /* Global block number of block U(k,j). */
perm_u[2 * jj + 1] = jj;
#endif
jb = usub[iukp]; /* Global block number of block U(k,j). */
nsupc = SuperSize (jb);
iukp += UB_DESCRIPTOR; /* Start fstnz of block U(k,j). */
iukp += nsupc;
}
iukp = iukp0;
#ifdef ISORT
isort (nub, iperm_u, perm_u);
#else
qsort (perm_u, (size_t) nub, 2 * sizeof (int_t),
&superlu_sort_perm);
#endif
j = jj0 = 0;
/************************************************************************/
double ttx =SuperLU_timer_();
#include "dlook_ahead_update.c"
lookaheadupdatetimer += SuperLU_timer_() - ttx;
/************************************************************************/
/*ifdef OMP_LOOK_AHEAD */
/* TAU_STATIC_TIMER_STOP("LOOK_AHEAD_UPDATE"); */
} /* if L(:,k) and U(k,:) not empty */
/* stat->time3 += SuperLU_timer_()-tt1; */
/* ================== */
/* == post receive == */
/* ================== */
kk1 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1);
for (kk0 = k0 + 1; kk0 <= kk1; kk0++) {
kk = perm_c_supno[kk0];
kcol = PCOL (kk, grid);
if (look_ahead[kk] == k0) {
if (mycol != kcol) {
if (ToRecv[kk] >= 1) {
scp = &grid->rscp; /* The scope of process row. */
look_id = kk0 % (1 + num_look_aheads);
recv_req = recv_reqs[look_id];
MPI_Irecv (Lsub_buf_2[look_id], Llu->bufmax[0],
mpi_int_t, kcol, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &recv_req[0]);
MPI_Irecv (Lval_buf_2[look_id], Llu->bufmax[1],
MPI_DOUBLE, kcol,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &recv_req[1]);
}
} else {
lk = LBj (kk, grid); /* Local block number. */
lsub1 = Lrowind_bc_ptr[lk];
lusup1 = Lnzval_bc_ptr[lk];
if (factored[kk] == -1) {
/* Factor diagonal and subdiagonal blocks and
test for exact singularity. */
factored[kk] = 0; /* flag column kk as factored */
double ttt1 = SuperLU_timer_();
PDGSTRF2 (options, kk0, kk, thresh,
Glu_persist, grid, Llu, U_diag_blk_send_req,
tag_ub, stat, info);
pdgstrf2_timer += SuperLU_timer_() - ttt1;
/* Process column *kcol+1* multicasts numeric
values of L(:,k+1) to process rows. */
look_id = kk0 % (1 + num_look_aheads);
send_req = send_reqs[look_id];
msgcnt = msgcnts[look_id];
if (lsub1) {
msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR;
msgcnt[1] = lsub1[1] * SuperSize (kk);
} else {
msgcnt[0] = 0;
msgcnt[1] = 0;
}
scp = &grid->rscp; /* The scope of process row. */
for (pj = 0; pj < Pc; ++pj) {
if (ToSendR[lk][pj] != EMPTY) {
MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj,
SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &send_req[pj]);
MPI_Isend (lusup1, msgcnt[1], MPI_DOUBLE, pj,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &send_req[pj + Pc]);
}
}
} /* for pj ... */
}
}
}
double tsch = SuperLU_timer_();
/************************************************************************/
#ifdef GPU_ACC
#include "dSchCompUdt-cuda.c"
#else
/*#include "SchCompUdt--Phi-2Ddynamic-alt.c"*/
#include "dSchCompUdt-2Ddynamic.c"
#endif
/*uncomment following to compare against SuperLU 3.3 baseline*/
/* #include "SchCompUdt--baseline.c" */
/************************************************************************/
NetSchurUpTimer += SuperLU_timer_()-tsch;
} /* for k0 = 0, ... */
/* ##################################################################
** END MAIN LOOP: for k0 = ...
################################################################## */
pdgstrfTimer= SuperLU_timer_()-pdgstrfTimer;
/* updating total flops */
#if ( PRNTlevel>=1 )
if (!iam) {
printf("Time in scattering %lf \n",scatter_timer );
printf("Time in dgemm %lf \n", gemm_timer );
printf("Total time spent in schur update is \t\t: %5.2lf seconds,\n",NetSchurUpTimer );
printf("Total Time in Factorization \t\t: %5.2lf seconds, \n", pdgstrfTimer);
printf("Time (other GEMM and Scatter) \t\t: %5.2lf seconds, \n", pdgstrfTimer-schur_flop_timer);
printf("Total time spent in schur update when offload \t\t: %5.2lf seconds,\n",CPUOffloadTimer );
}
#endif
#if ( DEBUGlevel>=2 )
for (i = 0; i < Pr * Pc; ++i) {
if (iam == i) {
dPrintLblocks(iam, nsupers, grid, Glu_persist, Llu);
dPrintUblocks(iam, nsupers, grid, Glu_persist, Llu);
printf ("(%d)\n", iam);
PrintInt10 ("Recv", nsupers, Llu->ToRecv);
}
MPI_Barrier (grid->comm);
}
#endif
// printf("Debug : MPI buffers 1\n");
/********************************************************
* Free memory *
********************************************************/
if (Pr * Pc > 1) {
SUPERLU_FREE (Lsub_buf_2[0]); /* also free Lsub_buf_2[1] */
SUPERLU_FREE (Lval_buf_2[0]); /* also free Lval_buf_2[1] */
if (Llu->bufmax[2] != 0)
SUPERLU_FREE (Usub_buf_2[0]);
if (Llu->bufmax[3] != 0)
SUPERLU_FREE (Uval_buf_2[0]);
if (U_diag_blk_send_req[myrow] != MPI_REQUEST_NULL) {
/* wait for last Isend requests to complete, deallocate objects */
for (krow = 0; krow < Pr; ++krow) {
if (krow != myrow)
MPI_Wait (U_diag_blk_send_req + krow, &status);
}
}
SUPERLU_FREE (U_diag_blk_send_req);
}
log_memory( -((Llu->bufmax[0] + Llu->bufmax[2]) * (num_look_aheads + 1) * iword +
(Llu->bufmax[1] + Llu->bufmax[3]) * (num_look_aheads + 1) * dword),
stat );
SUPERLU_FREE (Lsub_buf_2);
SUPERLU_FREE (Lval_buf_2);
SUPERLU_FREE (Usub_buf_2);
SUPERLU_FREE (Uval_buf_2);
SUPERLU_FREE (perm_c_supno);
SUPERLU_FREE (perm_u);
#ifdef ISORT
SUPERLU_FREE (iperm_u);
#endif
SUPERLU_FREE (look_ahead);
SUPERLU_FREE (factoredU);
SUPERLU_FREE (factored);
log_memory(-(6 * nsupers * iword), stat);
for (i = 0; i <= num_look_aheads; i++) {
SUPERLU_FREE (msgcnts[i]);
SUPERLU_FREE (msgcntsU[i]);
}
SUPERLU_FREE (msgcnts);
SUPERLU_FREE (msgcntsU);
for (i = 0; i <= num_look_aheads; i++) {
SUPERLU_FREE (send_reqs_u[i]);
SUPERLU_FREE (recv_reqs_u[i]);
SUPERLU_FREE (send_reqs[i]);
SUPERLU_FREE (recv_reqs[i]);
}
SUPERLU_FREE (recv_reqs_u);
SUPERLU_FREE (send_reqs_u);
SUPERLU_FREE (recv_reqs);
SUPERLU_FREE (send_reqs);
// printf("Debug : MPI buffers 3\n");
#ifdef GPU_ACC
checkCuda (cudaFreeHost (bigV));
checkCuda (cudaFreeHost (bigU));
cudaFree( (void*)dA ); /* Sherry added */
cudaFree( (void*)dB );
cudaFree( (void*)dC );
SUPERLU_FREE( handle );
SUPERLU_FREE( streams );
#else
SUPERLU_FREE (bigV);
SUPERLU_FREE (bigU);
#endif
log_memory(-(bigv_size + bigu_size) * dword, stat);
// printf("Debug : MPI buffers 5\n");
SUPERLU_FREE (Llu->ujrow);
SUPERLU_FREE (tempv2d);
SUPERLU_FREE (indirect);
SUPERLU_FREE (indirect2); /* Sherry added */
SUPERLU_FREE (iuip);
SUPERLU_FREE (ruip);
ldt = sp_ienv_dist(3);
log_memory( -(3 * ldt *ldt * dword + 2 * ldt * num_threads * iword
+ 2 * k * iword), stat );
/* Sherry added */
SUPERLU_FREE(omp_loop_time);
SUPERLU_FREE(full_u_cols);
SUPERLU_FREE(blk_ldu);
log_memory(-2 * ncb * dword, stat);
SUPERLU_FREE(stream_end_col);
SUPERLU_FREE(lookAheadFullRow);
SUPERLU_FREE(lookAheadStRow);
SUPERLU_FREE(lookAhead_lptr);
SUPERLU_FREE(lookAhead_ib);
SUPERLU_FREE(RemainFullRow);
SUPERLU_FREE(RemainStRow);
SUPERLU_FREE(Remain_lptr);
SUPERLU_FREE(Remain_ib);
SUPERLU_FREE(Remain_info);
SUPERLU_FREE(lookAhead_L_buff);
SUPERLU_FREE(Remain_L_buff);
log_memory( -(4 * mrb * iword + mrb * sizeof(Remain_info_t) +
ldt * ldt * (num_look_aheads + 1) * dword +
Llu->bufmax[1] * dword), stat );
SUPERLU_FREE(Ublock_info);
SUPERLU_FREE(Ublock_info_iukp);
SUPERLU_FREE(Ublock_info_rukp);
SUPERLU_FREE(Ublock_info_jb);
#if ( PROFlevel>=1 )
TIC (t1);
#endif
/* Prepare error message - find the smallesr index i that U(i,i)==0 */
if ( *info == 0 ) *info = n + 1;
MPI_Allreduce (info, &iinfo, 1, MPI_INT, MPI_MIN, grid->comm);
if ( iinfo == n + 1 ) *info = 0;
else *info = iinfo;
// printf("test out\n");
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
{
float msg_vol_max, msg_vol_sum, msg_cnt_max, msg_cnt_sum;
MPI_Reduce (&msg_cnt, &msg_cnt_sum,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm);
MPI_Reduce (&msg_cnt, &msg_cnt_max,
1, MPI_FLOAT, MPI_MAX, 0, grid->comm);
MPI_Reduce (&msg_vol, &msg_vol_sum,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm);
MPI_Reduce (&msg_vol, &msg_vol_max,
1, MPI_FLOAT, MPI_MAX, 0, grid->comm);
if (!iam) {
printf ("\tPDGSTRF comm stat:"
"\tAvg\tMax\t\tAvg\tMax\n"
"\t\t\tCount:\t%.0f\t%.0f\tVol(MB)\t%.2f\t%.2f\n",
msg_cnt_sum / Pr / Pc, msg_cnt_max,
msg_vol_sum / Pr / Pc * 1e-6, msg_vol_max * 1e-6);
}
}
#endif
#if ( PRNTlevel==3 )
MPI_Allreduce (&zero_msg, &iinfo, 1, MPI_INT, MPI_SUM, grid->comm);
if (!iam)
printf (".. # msg of zero size\t%d\n", iinfo);
MPI_Allreduce (&total_msg, &iinfo, 1, MPI_INT, MPI_SUM, grid->comm);
if (!iam)
printf (".. # total msg\t%d\n", iinfo);
#endif
#if ( DEBUGlevel>=2 )
for (i = 0; i < Pr * Pc; ++i) {
if (iam == i) {
dPrintLblocks (iam, nsupers, grid, Glu_persist, Llu);
dPrintUblocks (iam, nsupers, grid, Glu_persist, Llu);
printf ("(%d)\n", iam);
PrintInt10 ("Recv", nsupers, Llu->ToRecv);
}
MPI_Barrier (grid->comm);
}
#endif
#if ( DEBUGlevel>=3 )
printf ("(%d) num_copy=%d, num_update=%d\n", iam, num_copy, num_update);
#endif
#if ( DEBUGlevel>=1 )
CHECK_MALLOC (iam, "Exit pdgstrf()");
#endif
return 0;
} /* PDGSTRF */
|
InitialConditions.h | //
// Cubism3D
// Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland.
// Distributed under the terms of the MIT license.
//
// Created by Guido Novati (novatig@ethz.ch).
//
#ifndef CubismUP_3D_InitialConditions_h
#define CubismUP_3D_InitialConditions_h
#include "Operator.h"
CubismUP_3D_NAMESPACE_BEGIN
class InitialConditions : public Operator
{
public:
InitialConditions(SimulationData & s) : Operator(s) { }
template<typename K>
inline void run(const K kernel) {
#pragma omp parallel for schedule(static)
for (size_t i=0; i<vInfo.size(); i++)
kernel(vInfo[i], *(FluidBlock*)vInfo[i].ptrBlock);
}
void operator()(const double dt);
std::string getName() { return "IC"; }
};
CubismUP_3D_NAMESPACE_END
#endif
|
tree_reduce.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// RUN: %libomp-compile -DNOWAIT && %libomp-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc
#include "callback.h"
#include <omp.h>
#ifdef NOWAIT
#define FOR_CLAUSE nowait
#else
#define FOR_CLAUSE
#endif
int main() {
int sum = 0, a = 0, b = 0;
int i;
#pragma omp parallel num_threads(5)
// for 32-bit architecture we need at least 3 variables to trigger tree
#pragma omp for reduction(+ : sum, a, b) FOR_CLAUSE
for (i = 0; i < 10000; i++) {
a = b = sum += i;
}
printf("%i\n", sum);
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID:[0-9]+]]
// order and distribution to threads not determined
// CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}
// CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_end:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}
// CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}
// CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_end:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}
// CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}
// CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_end:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}
// CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}
// CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_end:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}
return 0;
}
|
pr49897-2.c | /* PR middle-end/49897 */
/* { dg-do run } */
extern void abort (void);
int
main ()
{
int i, j, x = 0, y, sum = 0;
#pragma omp parallel for reduction(+:sum) firstprivate(x) lastprivate(x, y)
for (i = 0; i < 10; i++)
{
x = i;
y = 0;
#pragma omp parallel for reduction(+:sum) firstprivate(y) lastprivate(y)
for (j = 0; j < 10; j++)
{
y = j;
sum += y;
}
}
if (x != 9 || y != 9 || sum != 450)
abort ();
return 0;
}
|
levelset_fluid_solver.h | /*
==============================================================================
KratosPFEMApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: antonia $
// Date: $Date: 2009-01-14 16:24:38 $
// Revision: $Revision: 1.11 $
//
//
#if !defined(KRATOS_LEVELSET_FLUID_SOLVER_H_INCLUDED)
#define KRATOS_LEVELSET_FLUID_SOLVER_H_INCLUDED
#define SPLIT_OSS
// #define SYMM_PRESS
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// #include <omp.h>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
//#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "incompressible_fluid_application.h"
namespace Kratos
{
template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver>
class LevelSetFluidSolver
{
public:
//name for the self defined structure
typedef EdgesStructureType<TDim> CSR_Tuple;
typedef std::vector<CSR_Tuple> EdgesVectorType;
//name for row start and column index vectors
typedef std::vector<unsigned int> IndicesVectorType;
//defining matrix type for test calculations
typedef std::vector< array_1d<double, TDim> > CalcVectorType;
//defining type for local storage of nodal values
typedef std::vector<double> ValuesVectorType;
//defining types for matrix operations
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
//constructor and destructor
LevelSetFluidSolver(MatrixContainer& mr_matrix_container,
ModelPart& mr_model_part,
bool include_shock_capturing,
bool smooth_convective_velocity
)
: mr_matrix_container(mr_matrix_container),mr_model_part(mr_model_part)
{
//options
minclude_shock_capturing = include_shock_capturing;
msmooth_convective_velocity = smooth_convective_velocity;
};
~LevelSetFluidSolver() {};
//***********************************
//function to initialize fluid solver
void Initialize(
)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = mr_model_part.Nodes().size();
unsigned int n_edges = mr_matrix_container.GetNumberEdges();
//size data vectors
mWork.resize(n_nodes);
mvel_n.resize(n_nodes);
mvel_n1.resize(n_nodes);
mInitMom.resize(n_nodes);
mCurrMom.resize(n_nodes);
mPn.resize(n_nodes);
mPn1.resize(n_nodes);
mViscosity.resize(n_nodes);
mRho.resize(n_nodes);
mRhoOld.resize(n_nodes);
mC2inv.resize(n_nodes);
mA.resize(n_nodes);
mHmin.resize(n_nodes);
mHavg.resize(n_nodes);
mNodalFlag.resize(n_nodes);
mdistances.resize(n_nodes);
mEps.resize(n_nodes);
mEpsOld.resize(n_nodes);
mD.resize(n_nodes);
mTauPressure.resize(n_nodes);
mTauConvection.resize(n_nodes);
mPi.resize(n_nodes);
mXi.resize(n_nodes);
mBodyForce.resize(n_nodes);
mDrag.resize(n_nodes);
mx.resize(n_nodes);
mCp.resize(n_nodes);
mMach.resize(n_nodes);
mEdgeDimensions.resize(n_edges);
mBeta.resize(n_edges);
for (unsigned int csr_index = 0; csr_index < n_edges; csr_index++)
mBeta[csr_index] = 1.0;
ValuesVectorType external_pressure;
external_pressure.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(EXTERNAL_PRESSURE, external_pressure, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(IS_BOUNDARY, mNodalFlag, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(DENSITY, mRho, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
//mr_matrix_container.FillCoordinatesFromDatabase(mx, mr_model_part.Nodes());
//set flag for first time step
mFirstStep = true;
//loop to categorize boundary nodes
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
//differentiate between types of boundary condition
switch (static_cast<unsigned int>(mNodalFlag[i_node]))
{
case 1:
//velocity inlet
mVelocityInletList.push_back(i_node);
mVelocityInlet.push_back(mvel_n[i_node]);
mDensityInlet.push_back(mRho[i_node]);
mDissipationList.push_back(i_node);
break;
case 2:
//no-slip condition
mNoSlipBoundaryList.push_back(i_node);
break;
case 3:
//slip condition
mSlipBoundaryList.push_back(i_node);
break;
case 4:
//mixed condition (slip and pressure node)
mPressureOutletList.push_back(i_node);
mPressureOutlet.push_back(external_pressure[i_node]);
mSlipBoundaryList.push_back(i_node);
mDissipationList.push_back(i_node);
break;
case 5:
//pressure outlet
mPressureOutletList.push_back(i_node);
mPressureOutlet.push_back(external_pressure[i_node]);
mDissipationList.push_back(i_node);
break;
}
}
//print number of nodes corresponding to the different types of boundary conditions
KRATOS_WATCH(mVelocityInletList.size())
KRATOS_WATCH(mDensityInlet.size())
KRATOS_WATCH(mPressureOutletList.size())
KRATOS_WATCH(mSlipBoundaryList.size())
KRATOS_WATCH(mNoSlipBoundaryList.size())
KRATOS_WATCH(mDissipationList.size())
//determine number of edges and entries
unsigned int n_nonzero_entries = 2 * n_edges + n_nodes;
//allocate memory for variables
mL.resize(n_nodes,n_nodes,n_nonzero_entries);
//loop over all nodes
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
//flag for considering diagonal matrix elements
bool flag = 0;
//loop over all neighbours
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//define matrix structure row by row (the order does matter!)
if ((j_neighbour > i_node) && (flag == 0))
{
//add diagonal/nodal contribution
mL.push_back(i_node, i_node, 0.0);
flag = 1;
}
//add non-diagonal/edge contribution
mL.push_back(i_node, j_neighbour, 0.0);
}
//if diagonal element is the last non-zero element of the row
if (flag == 0)
mL.push_back(i_node, i_node, 0.0);
}
//compute area normals
CalculateNormals(mr_model_part.Conditions());
// WriteVectorToDatabase(NORMAL, mPressureNormal, mr_model_part.Nodes());
mr_matrix_container.WriteVectorToDatabase(NORMAL, mSlipNormal, mr_model_part.Nodes());
//compute minimum length of the surrounding edges
CalculateEdgeLengths(mr_model_part.Nodes());
//prepare initial momentum for first time step
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& rho_i = mRho[i_node];
array_1d<double, TDim>& u_i = mvel_n1[i_node];
array_1d<double, TDim>& U_i = mInitMom[i_node];
//compute initial momentum for iteration of step 1
for (unsigned int component = 0; component < TDim; component++)
U_i[component] = rho_i * u_i[component];
}
KRATOS_CATCH("")
}
//***************************************
//function to set adequate time step size
void ComputeTimeStep(double CFLNumber)
{
KRATOS_TRY
//local variable for time step size
double delta_t = 1e10;
//getting value of current velocity and of viscosity
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, mr_model_part.Nodes());
mr_matrix_container.FillCoordinatesFromDatabase(mx, mr_model_part.Nodes());
//*******************
//loop over all nodes
double n_nodes = mvel_n1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& v_i = mvel_n1[i_node];
// KRATOS_WATCH(v_i);
array_1d<double, TDim>& x_i = mx[i_node];
// KRATOS_WATCH(x_i);
//use CFL condition to compute time step size
double delta_t_i = CFLNumber * 1.0 / (norm_2(v_i)/mHmin[i_node] + 2.0 * mViscosity[i_node]/(mHmin[i_node]*mHmin[i_node]) );
//considering the most restrictive case of neighbor's velocities with similar direction but opposite sense.
//loop over all neighbours
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim>& v_j = mvel_n1[j_neighbour];
array_1d<double, TDim>& x_j = mx[j_neighbour];
array_1d<double, TDim> edge_dir = ZeroVector(TDim);
// KRATOS_WATCH(x_j);
// KRATOS_WATCH(v_j);
//Calculate edge direction
edge_dir[0] = x_j[0]-x_i[0];
edge_dir[1] = x_j[1]-x_i[1];
edge_dir[2] = x_j[2]-x_i[2];
// KRATOS_WATCH(edge_dir);
double aux = norm_2(edge_dir);
// KRATOS_WATCH(aux);
if (aux == 0.0)
{
edge_dir = ZeroVector(TDim);
}
else
{
//normalized edge direction
edge_dir /= norm_2(edge_dir);
// KRATOS_WATCH(edge_dir);
}
//int aux = inner_prod(v_i,v_j);
double v_i_par = inner_prod(v_i, edge_dir);
double v_j_par = inner_prod(v_j, edge_dir);
// KRATOS_WATCH(v_i_par);
// KRATOS_WATCH(v_j_par);
if ((v_i_par >= 0.0 && v_j_par <= 0.0) || (v_i_par <= 0.0 && v_j_par >= 0.0))
{
double delta_t_j = CFLNumber / ((fabs(v_i_par) + fabs(v_j_par))/mHmin[i_node] + 2.0 * mViscosity[i_node]/(mHmin[i_node]*mHmin[i_node]));
// KRATOS_WATCH(delta_t_j);
// KRATOS_WATCH(delta_t_i);
if (delta_t_j < delta_t_i)
delta_t_i = delta_t_j;
}
}
//choose the overall minimum of delta_t_i
if (delta_t_i < delta_t)
delta_t = delta_t_i;
}
//*******************
//perform MPI syncronization of the dt (minimum should be kept)
//write time step size to Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
CurrentProcessInfo[DELTA_TIME] = delta_t;
KRATOS_CATCH("")
}
//**********************************************************************************
//function to solve fluid equations - fractional step 1: compute fractional momentum
Vector SolveStep1()
{
KRATOS_TRY
//PREREQUISITES
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
CalcVectorType rhs;
rhs.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes);
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes);
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, rNodes);
mr_matrix_container.FillScalarFromDatabase(DENSITY, mRho, rNodes);
mr_matrix_container.FillOldScalarFromDatabase(DENSITY, mRhoOld, rNodes);
mr_matrix_container.FillVectorFromDatabase(BODY_FORCE, mBodyForce, rNodes);
mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, rNodes);
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, rNodes);
mr_matrix_container.FillOldScalarFromDatabase(POROSITY, mEpsOld, rNodes);
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
#pragma omp parallel for
for ( int i_node = 0; i_node < n_nodes; i_node++)
{
// -> mCurrMom
//compute the momentum at the current step -> mCurrMom
double& rho_i = mRho[i_node];
const array_1d<double, TDim>& u_i = mvel_n1[i_node];
array_1d<double, TDim>& U_i = mCurrMom[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = rho_i * u_i[comp];
// -> mInitMom
double& rho_i_old = mRhoOld[i_node];
//compute the momentum at the beginning of the step
const array_1d<double, TDim>& u_i_old = mvel_n[i_node];
array_1d<double, TDim>& U_i_old = mInitMom[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i_old[comp] = rho_i_old * u_i_old[comp];
//compute volumetric body force
array_1d<double, TDim>& f_i = mBodyForce[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
f_i[comp] *= rho_i;
}
DivideByPorosity(mCurrMom, mCurrMom, mEps);
DivideByPorosity(mInitMom, mInitMom, mEpsOld);
DivideByPorosity(mvel_n, mvel_n, mEpsOld);
DivideByPorosity(mvel_n1, mvel_n1, mEps);
//compute advective velocity - area average of the current velocity
CalculateAdvectiveVelocity(mvel_n1, mA, msmooth_convective_velocity);
//compute intrinsic time
double time_inv = 1.0/delta_t;
// time_inv = 0.0;
#pragma omp parallel for firstprivate(time_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
// double& h_i = mHavg[i_node];
double& h_i = mHmin[i_node];
array_1d<double, TDim>& a_i = mA[i_node];
const double nu_i = mViscosity[i_node];
// mTau[i_node] = 1.0 / (0.5 * norm_2(a_i)/h_i + time_inv);
double vel_norm = norm_2(a_i);
// mTauPressure[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) );
mTauPressure[i_node] = delta_t;
mTauConvection[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) );
if (mTauPressure[i_node] < delta_t)
mTauPressure[i_node] = delta_t;
else if(mTauPressure[i_node] > 100.0*delta_t)
mTauPressure[i_node] = 100.0*delta_t;
}
//compute pressure switch
if (mFirstStep == false)
if(minclude_shock_capturing == true)
ComputeMonotonicityPreserving();
mr_matrix_container.AssignVectorToVector(mInitMom,mWork); //mWork = mvel_n NO!!!-> mWork = mU_iold
//first step of Runge Kutta
mr_matrix_container.AssignVectorToVector(mvel_n,mvel_n1); //mvel_n1 = mvel_n
mr_matrix_container.AssignVectorToVector(mInitMom,mCurrMom);
// double start_prod = omp_get_wtime();
CalculateAdvectiveVelocity(mvel_n1, mA, msmooth_convective_velocity);
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mCurrMom, mPn1, mA, mBodyForce, mViscosity, rhs);
/*double norma=0.0;
for (int i_node = 0; i_node < n_nodes; i_node++)
for (int kkk = 0; kkk < TDim; kkk++)
norma += rhs[i_node][kkk]*rhs[i_node][kkk];
KRATOS_WATCH(norma);*/
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/6.0 , mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mCurrMom,mInitMom, 0.5*delta_t , mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mCurrMom);
/*mr_matrix_container.WriteVectorToDatabase(CONV_PROJ, mA, rNodes);
mr_matrix_container.WriteScalarToDatabase(TEMPERATURE, mTauConvection, rNodes);*/
//second step
CalculateVelocity(mvel_n1,mCurrMom,mRho);
CalculateAdvectiveVelocity( mvel_n1, mA, msmooth_convective_velocity);
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mCurrMom, mPn1, mA, mBodyForce,mViscosity, rhs );
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/3.0 , mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mCurrMom,mInitMom, 0.5*delta_t , mr_matrix_container.GetInvertedMass(),rhs);
ApplyVelocityBC(mCurrMom);
//third step
CalculateVelocity(mvel_n1,mCurrMom,mRho);
CalculateAdvectiveVelocity( mvel_n1, mA, msmooth_convective_velocity);
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mCurrMom, mPn1, mA, mBodyForce,mViscosity, rhs);
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/3.0 , mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mCurrMom,mInitMom, delta_t , mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mCurrMom);
//fourth step
CalculateVelocity(mvel_n1,mCurrMom,mRho);
CalculateAdvectiveVelocity( mvel_n1, mA, msmooth_convective_velocity);
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mCurrMom, mPn1, mA, mBodyForce,mViscosity, rhs );
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/6.0 , mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mCurrMom);
//compute right-hand side
mr_matrix_container.AssignVectorToVector(mWork,mCurrMom);
ApplyVelocityBC(mCurrMom);
// //compute ratio for iteration
Vector stop_criteria(TDim);
noalias(stop_criteria) = ZeroVector(TDim);
// stop_criteria[0] = 0.0;
// stop_criteria[1] = 0.0;
return stop_criteria;
KRATOS_CATCH("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS(
const CalcVectorType& momentum,
const ValuesVectorType& pressure,
const CalcVectorType& convective_velocity,
const CalcVectorType& body_force,
const ValuesVectorType& viscosity,
CalcVectorType& rhs)
{
KRATOS_TRY
int n_nodes = momentum.size();
//calculating the convective projection
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& pi_i = mPi[i_node]; //******************
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] = 0.0;
const array_1d<double, TDim>& a_i = convective_velocity[i_node];
const array_1d<double, TDim>& U_i = momentum[i_node];
//const double& p_i = pressure[i_node];
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour];
const array_1d<double, TDim>& U_j = momentum[j_neighbour];
//const double& p_j = pressure[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_ConvectiveContribution(pi_i,a_i,U_i,a_j,U_j);
// edge_ij.Add_grad_p(pi_i,p_i,p_j);
// edge_ij.Sub_grad_p(pi_i,p_i,p_j);
}
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] *= m_inv;
}
//perform MPI syncronization
//calculating the RHS
array_1d<double,TDim> stab_low;
array_1d<double,TDim> stab_high;
#pragma omp parallel for private(stab_low,stab_high)
for ( int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist < 0.0) //node is inside domain ---- if outside do nothing
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& f_i = body_force[i_node];
const array_1d<double, TDim>& a_i = convective_velocity[i_node];
const array_1d<double, TDim>& U_i = momentum[i_node];
const array_1d<double, TDim>& pi_i = mPi[i_node];
const double& p_i = pressure[i_node];
const double& nu_i = viscosity[i_node];
//double& h_i = mHmin[i_node];
//initializing with the external forces (e.g. gravity)
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = m_i * f_i[comp];
//porous contribution
double eps = mEps[i_node];
double d = mD[i_node]; //diameter of the particle
double kinv = 150.0*(1.0-eps)*(1.0-eps)/(eps*eps*eps*d*d);
double norm_u_2 = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
norm_u_2 = a_i[comp]*a_i[comp];
// norm_u_2 = U_i[comp]*U_i[comp];
//CORRECTED Term
double nonlin_term = kinv * nu_i * eps + 1.75 * sqrt(norm_u_2 * kinv / (eps * 150.0));
//ERROR IN WRITING THE NON LINEAR TERM//
// double nonlin_term = kinv * nu_i * eps + 1.75 * norm_u_2 * sqrt(kinv / ( eps * 150.0));
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] -= m_i * nonlin_term * U_i[comp];
//convective term
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour];
const array_1d<double, TDim>& U_j = momentum[j_neighbour];
const array_1d<double, TDim>& pi_j = mPi[j_neighbour];
const double& p_j = pressure[j_neighbour];
const double& nu_j = viscosity[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ConvectiveContribution(rhs_i,a_i,U_i,a_j,U_j);
//take care! we miss including a B.C. for the external pressure
edge_ij.Add_Gp(rhs_i,p_i,p_j);
edge_ij.Sub_ViscousContribution(rhs_i,U_i,nu_i,U_j,nu_j);
//add stabilization
// edge_ij.CalculateConvectionStabilization_LOW( stab_low,a_i,U_i,p_i,a_j,U_j,p_j);
edge_ij.CalculateConvectionStabilization_LOW( stab_low,a_i,U_i,a_j,U_j);
double edge_tau = mTauConvection[i_node];
edge_ij.CalculateConvectionStabilization_HIGH( stab_high,a_i,pi_i,a_j,pi_j);
double beta = mBeta[csr_index];
edge_ij.Sub_StabContribution( rhs_i, edge_tau, beta, stab_low, stab_high);
}
}
}
//boundary integrals --> finishing the calculation of the pressure gradient
int loop_size1 = mPressureOutletList.size();
#pragma omp parallel for
for (int i_pressure = 0; i_pressure < loop_size1; i_pressure++)
{
unsigned int i_node = mPressureOutletList[i_pressure];
array_1d<double, TDim>& rhs_i = rhs[i_node];
const double& p_ext_i = mPressureOutlet[i_pressure];
const array_1d<double, TDim>& an_i = mPressureNormal[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] -= an_i[comp] * p_ext_i;
// const array_1d<double, TDim>& U_i = momentum[i_node];
// const array_1d<double, TDim>& a_i = convective_velocity[i_node];
// double temp = 0.0;
// double scalar_prod = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// {
// scalar_prod += an_i[comp] * U_i[comp];
// temp += an_i[comp] * an_i[comp];
// }
// temp = sqrt(temp);
// for (unsigned int comp = 0; comp < TDim; comp++)
// // rhs_i[comp] -= U_i[comp] * temp;
// // rhs_i[comp] -= an_i[comp] * scalar_prod / temp;
// rhs_i[comp] -= a_i[comp] * scalar_prod / temp;
}
KRATOS_CATCH("")
}
//*************************************************************************
//function to solve fluid equations - fractional step 2: calculate pressure
void SolveStep2(typename TLinearSolver::Pointer pLinearSolver)
{
KRATOS_TRY
//PREREQUISITES
//allocate memory for variables
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//unknown and right-hand side vector
TSystemVectorType dp, rhs;
dp.resize(n_nodes);
rhs.resize(n_nodes);
array_1d<double, TDim> dU_i, dU_j, work_array;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
#ifdef _OPENMP
double time_inv = 0.0; //1.0/delta_t;
#endif
#ifdef SPLIT_OSS
// #pragma omp parallel for firstprivate(time_inv), private(work_array)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& xi_i = mXi[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
xi_i[comp] = 0.0;
const double& p_i = mPn1[i_node];
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mPn1[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(xi_i,p_i,p_j);
// // // edge_ij.Sub_grad_p(xi_i,p_i,p_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
xi_i[l_comp] *= m_inv;
}
#endif
//loop over all nodes
#pragma omp parallel for firstprivate(time_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i = 0.0;
double& p_i = mPn1[i_node];
double& eps_i = mEps[i_node];
array_1d<double, TDim>& U_i_curr = mCurrMom[i_node];
//array_1d<double, TDim>& a_i = mA[i_node];
double& rho_i = mRho[i_node];
#ifdef SPLIT_OSS
array_1d<double, TDim>& xi_i = mXi[i_node];
#else
array_1d<double, TDim>& pi_i = mPi[i_node];
#endif
//const double& h_i = mHavg[i_node];
double l_ii = 0.0;
//loop over all neighbours
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
double& p_j = mPn1[j_neighbour];
double& eps_j = mEps[j_neighbour];
array_1d<double, TDim>& U_j_curr = mCurrMom[j_neighbour];
//array_1d<double, TDim>& a_j = mA[j_neighbour];
#ifdef SPLIT_OSS
array_1d<double, TDim>& xi_j = mXi[j_neighbour];
#else
array_1d<double, TDim>& pi_j = mPi[j_neighbour];
#endif
//const double& h_j = mHavg[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
#ifdef SYMM_PRESS
double edge_tau = 0.5*( mTauPressure[i_node] + mTauPressure[j_neighbour]);
#else
double edge_tau = mTauPressure[i_node];
#endif
// double edge_tau = CalculateEdgeTau(time_inv,h_i,a_i,h_j,a_j);
//
//compute laplacian operator
double sum_l_ikjk;
edge_ij.CalculateScalarLaplacian(sum_l_ikjk);
double sum_l_ikjk_onlystab = sum_l_ikjk * (edge_tau);
sum_l_ikjk *= (delta_t + edge_tau);
//assemble right-hand side
//pressure contribution
rhs_i -= sum_l_ikjk_onlystab * (p_j - p_i);
//other part of the residual
#if !defined(SPLIT_OSS)
array_1d<double, TDim>& a_j = mA[j_neighbour];
boost::numeric::ublas::bounded_matrix<double,TDim,TDim>& L = edge_ij.LaplacianIJ;
for(unsigned int i = 0; i<TDim; i++)
for(unsigned int j = 0; j<TDim; j++)
rhs_i -= edge_tau * a_i[j] * L(i,j) * (U_j_curr[j] - U_i_curr[j]);
#endif
//calculating the divergence of the fract vel
edge_ij.Sub_D_v(rhs_i,U_i_curr * eps_i,U_j_curr * eps_j);
// edge_ij.Sub_D_v(rhs_i,a_i*rho_i,a_j*rho_i);
//high order stabilizing term
double temp = 0.0;
#ifdef SPLIT_OSS
// edge_ij.Add_div_v(temp,mTauPressure[i_node]*xi_i,mTauPressure[j_neighbour]*xi_j);
edge_ij.Add_div_v(temp,xi_i,xi_j);
#else
edge_ij.Add_div_v(temp,pi_i,pi_j);
#endif
temp *= mBeta[csr_index];
rhs_i += edge_tau * temp;
// rhs_i += temp;
//assemble laplacian matrix
mL(i_node, j_neighbour) = sum_l_ikjk;
l_ii -= sum_l_ikjk;
}
mL(i_node, i_node) = l_ii;
//add density variation contribution
const double& rho_i_old = mRhoOld[i_node];
const double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
rhs_i -= m_i * (rho_i - rho_i_old)/delta_t;
//add mass contribution for compressible flows
/* double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
mL(i_node, i_node) += mC2inv[i_node] * m_i / delta_t;*/
}
//find the max diagonal term
double max_diag = 0.0;
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double L_diag = mL(i_node, i_node);
if(fabs(L_diag) > fabs(max_diag)) max_diag = L_diag;
}
//respect pressure boundary conditions by penalization
double huge = max_diag * 1e30;
for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++)
{
unsigned int i_node = mPressureOutletList[i_pressure];
mL(i_node, i_node) = huge;
rhs[i_node] = 0.0;
}
//modification for level_set
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
//selecting nodes for fixing pressure
// std::vector< unsigned int > aux(mdistances.size());
// for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++)
// aux[i_dist] = 0;
// for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++)
// {
// if(mdistances[i_dist] > 0)
// {
// aux[i_dist] = 1;
// /* for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_dist]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_dist+1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// aux[j_neighbour] = 1;
//
// }*/
// }
//
// }
for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++)
{
// if(aux[i_dist] != 0)
if(mdistances[i_dist] > 0)
{
// mPn1[i_dist] = 0.0;
mL(i_dist, i_dist) = huge;
rhs[i_dist] = 0.0;
}
}
//set starting vector for iterative solvers
for (int i_node = 0; i_node < n_nodes; i_node++)
dp[i_node] = 0.0;
//solve linear equation system L dp = rhs
pLinearSolver->Solve(mL,dp,rhs);
KRATOS_WATCH(*pLinearSolver)
//update pressure
for (int i_node = 0; i_node < n_nodes; i_node++)
mPn1[i_node] += dp[i_node];
for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++)
{
unsigned int i_node = mPressureOutletList[i_pressure];
mPn1[i_node] = mPressureOutlet[i_pressure];
}
//calculate density variation from pressure variation
// for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
// mRho[i_node] = mRhoOld[i_node] + dp[i_node] * mC2inv[i_node];
// for (unsigned int i_density = 0; i_density < mDensityInlet.size(); i_density++)
// {
// unsigned int i_node = mVelocityInletList[i_density];
// mRho[i_node] = mDensityInlet[i_density];
// }
//write pressure and density to Kratos
mr_matrix_container.WriteScalarToDatabase(PRESSURE, mPn1, rNodes);
// mr_matrix_container.WriteScalarToDatabase(DENSITY, mRho, rNodes);
KRATOS_CATCH("")
}
//**********************************************************************************
//function to solve fluid equations - fractional step 3: correct fractional momentum
void SolveStep3()
{
KRATOS_TRY
//get number of nodes
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//CANCELLAAAAAAA è necessario??! Non lo sto riempendo con nulla....e ad ogni passo di tempo è nuovo....
mr_matrix_container.FillVectorFromDatabase(SEEPAGE_DRAG, mDrag, rNodes);
//CORRECT FRACTIONAL MOMENTUM
//define work array
array_1d<double, TDim> correction;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
//compute end of step momentum
#pragma omp parallel for private(correction) firstprivate(delta_t)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist < 0.0) //node is inside domain ---- if outside do nothing
{
array_1d<double, TDim>& U_i_curr = mCurrMom[i_node];
double delta_p_i = mPn1[i_node] - mPn[i_node];
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
correction[l_comp] = 0.0;
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
double delta_p_j = mPn1[j_neighbour] - mPn[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_Gp(correction,delta_p_i,delta_p_j);
// edge_ij.Sub_Gp(correction,delta_p_i,delta_p_j);
}
//compute prefactor
double coefficient = delta_t * m_inv;
//correct fractional momentum
for (unsigned int comp = 0; comp < TDim; comp++)
U_i_curr[comp] += coefficient * correction[comp];
}
}
ApplyVelocityBC(mCurrMom);
CalculateVelocity(mvel_n1,mCurrMom,mRho);
MultiplyByPorosity(mvel_n1, mvel_n1, mEps);
//write velocity of time step n+1 to Kratos
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
CalculateDrag(mA, mCurrMom, mDrag, mViscosity);
//CALCULATE THE DRAG MATRIX TO PASS TO THE SOLID PART
mr_matrix_container.WriteVectorToDatabase(SEEPAGE_DRAG, mDrag, rNodes);
KRATOS_CATCH("")
}
//************************************
//function to calculate speed of sound
void SolveStep4(ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//get number of nodes
int n_nodes = mC2inv.size();
//compute speed of sound using equation of state
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rho_i = mRho[i_node];
double p_i_abs = mPn1[i_node];
mC2inv[i_node] = rho_i / (mGamma * p_i_abs);
}
KRATOS_CATCH("")
}
//************************************
void ApplyVelocityBC(CalcVectorType& MomentumArray)
{
KRATOS_TRY
//velocity inlet
int inlet_size = mVelocityInletList.size();
#pragma omp parallel for schedule(static)
for (int i_velocity = 0; i_velocity < inlet_size; i_velocity++)
{
unsigned int i_node = mVelocityInletList[i_velocity];
array_1d<double, TDim>& u_i = mVelocityInlet[i_velocity];
double& rho_i = mDensityInlet[i_velocity];
array_1d<double, TDim>& U_i = MomentumArray[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = rho_i * u_i[comp];
}
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
array_1d<double, TDim>& U_i = MomentumArray[i_node];
array_1d<double, TDim>& an_i = mSlipNormal[i_node];
double projection_length = 0.0;
double normalization = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
normalization += an_i[comp] * an_i[comp];
}
projection_length /= normalization;
//tangential momentum as difference between original and normal momentum
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] -= projection_length * an_i[comp];
}
//no-slip condition
int no_slip_size = mNoSlipBoundaryList.size();
#pragma omp parallel for
for (int i_noslip = 0; i_noslip < no_slip_size; i_noslip++)
{
unsigned int i_node = mNoSlipBoundaryList[i_noslip];
array_1d<double, TDim>& U_i = MomentumArray[i_node];
noalias(U_i) = ZeroVector(TDim);
}
KRATOS_CATCH("")
}
//********************************
//function to compute coefficients
void ExtrapolateVelocities(unsigned int extrapolation_layers)
{
KRATOS_TRY
typedef Node<3> PointType;
typedef PointerVector<PointType > PointVector;
typedef PointVector::iterator PointIterator;
//reset is visited flag
for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0;
}
//generate a container with the layers to be extrapolated
std::vector< PointVector > layers(extrapolation_layers);
//detect the nodes inside the fluid surface
for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if( inode->FastGetSolutionStepValue(DISTANCE) <= 0.0) //candidates are only the ones inside the fluid domain
{
WeakPointerVector< Node<3> >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if(i->FastGetSolutionStepValue(DISTANCE) > 0) //add the node as free surface if one of its neighb is outside
{
if( inode->GetValue(IS_VISITED) == 0)
{
layers[0].push_back( *(inode.base() ) );
inode->GetValue(IS_VISITED) = 1;
}
}
}
}
}
// //reset is visited flag
// for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
// inode != mr_model_part.NodesEnd();
// inode++)
// {
// inode->GetValue(IS_VISITED) = 0;
// }
//fill the following layers by neighbour relationships
//each layer fills the following
for(unsigned int il = 0; il<extrapolation_layers-1; il++)
{
for( PointIterator iii=(layers[il]).begin(); iii!=(layers[il]).end(); iii++)
{
WeakPointerVector< Node<3> >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for(WeakPointerVector< Node<3> >::iterator jjj=neighb_nodes.begin(); jjj !=neighb_nodes.end(); jjj++) //destination = origin1 + value * Minv*origin
{
if( jjj->FastGetSolutionStepValue(DISTANCE) > 0 &&
jjj->GetValue(IS_VISITED) == 0.0 )
{
layers[il+1].push_back( Node<3>::Pointer( *(jjj.base() ) ) );
jjj->GetValue(IS_VISITED) = double(il+2.0);
}
}
}
}
//perform extrapolation layer by layer by making an average
//of the neighbours of lower order
array_1d<double,3> aux;
for(unsigned int il = 1; il<extrapolation_layers; il++)
{
for( PointIterator iii=layers[il].begin(); iii!=layers[il].end(); iii++)
{
// noalias(aux) = ZeroVector(3);
// double dist_min = 10000000000.0;
//
// array_1d<double,3>& coords_I = iii->Coordinates();
//
// WeakPointerVector< Node<3> >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
// for(WeakPointerVector< Node<3> >::iterator j=neighb_nodes.begin(); j !=neighb_nodes.end(); j++)
// {
// if(j->GetValue(IS_VISITED) < il+1) //if it is on the next layer
// {
// array_1d<double,3>& coords_J = j->Coordinates();
//
// double dist = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// dist += pow(coords_I[comp]-coords_J[comp],2);
//
// if(dist < dist_min)
// {
// dist_min = dist;
// noalias( iii->FastGetSolutionStepValue(VELOCITY) ) = j->FastGetSolutionStepValue(VELOCITY);
// }
//
// }
// }
//extrapolate the average velocity
noalias(aux) = ZeroVector(3);
double avg_number = 0.0;
WeakPointerVector< Node<3> >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for(WeakPointerVector< Node<3> >::iterator i=neighb_nodes.begin(); i !=neighb_nodes.end(); i++)
{
if(i->GetValue(IS_VISITED) < il+1 && i->GetValue(IS_VISITED))
{
noalias(aux) += i->FastGetSolutionStepValue(VELOCITY);
avg_number += 1.0;
}
}
if(avg_number != 0.0)
aux /= avg_number;
noalias( iii->FastGetSolutionStepValue(VELOCITY) ) = aux;
// noalias( iii->FastGetSolutionStepValue(VELOCITY,1) ) = aux;
}
}
// mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
// mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
//
// unsigned int n_nodes = mPn1.size();
//
// //pressure coefficient
// // #pragma omp parallel for
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// const double dist_i = mdistances[i_node];
//
//
// if( dist_i > 0.0)
// {
// double nn = 0.0;
//
// array_1d<double, TDim>& vel_i = mvel_n1[i_node];
//
// for (unsigned int comp = 0; comp < TDim; comp++)
// vel_i[comp] = 0.0;
//
// //compute edge contributions dt*M^(-1)Gp
// for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//
// const double dist_j = mdistances[j_neighbour];
//
// if(dist_j <= 0.0)
// {
// const array_1d<double, TDim>& vel_j = mvel_n1[j_neighbour];
//
// for (unsigned int comp = 0; comp < TDim; comp++)
// vel_i[comp] += vel_j[comp];
//
// nn += 1.0;
//
// }
// }
//
// if(nn> 1e-6) //it should be either 0 1 .. N
// {
// // std::cout << "inode= " << i_node << "nn = " << nn << std::endl;
//
// double inv_nn = 1.0/nn;
// for (unsigned int comp = 0; comp < TDim; comp++)
// vel_i[comp] *= inv_nn;
// KRATOS_WATCH(vel_i);
// }
//
// }
// }
//
//
//
// // ApplyVelocityBC(mCurrMom);
//
//
// //write velocity of time step n+1 to Kratos
// mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
KRATOS_CATCH("")
}
void ChangeSignToDistance()
{
KRATOS_TRY
for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
double dist = inode->FastGetSolutionStepValue(DISTANCE);
inode->FastGetSolutionStepValue(DISTANCE) = -dist;
}
KRATOS_CATCH("")
}
void MarkNodesByDistance(double min, double max )
{
KRATOS_TRY
for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
double dist = inode->FastGetSolutionStepValue(DISTANCE);
if(dist > min && dist < max)
inode->GetValue(IS_VISITED) = 1;
else
inode->GetValue(IS_VISITED) = 0;
}
KRATOS_CATCH("")
}
void SaveScalarVariableToOldStep(Variable<double>& rVar)
{
KRATOS_TRY
for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->FastGetSolutionStepValue(rVar,1) = inode->FastGetSolutionStepValue(rVar);
}
KRATOS_CATCH("")
}
void MarkExternalAndMixedNodes( )
{
KRATOS_TRY
for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0;
}
//detect the nodes inside the fluid surface
for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if( inode->FastGetSolutionStepValue(DISTANCE) > 0.0) //candidates are only the ones inside the fluid domain
{
inode->GetValue(IS_VISITED) = 1;
WeakPointerVector< Node<3> >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
i->GetValue(IS_VISITED) = 1;
}
}
}
KRATOS_CATCH("")
}
void MarkInternalAndMixedNodes( )
{
KRATOS_TRY
for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0;
}
//detect the nodes inside the fluid surface
for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if( inode->FastGetSolutionStepValue(DISTANCE) <= 0.0) //candidates are only the ones inside the fluid domain
{
inode->GetValue(IS_VISITED) = 1;
WeakPointerVector< Node<3> >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
i->GetValue(IS_VISITED) = 1;
}
}
}
KRATOS_CATCH("")
}
void CalculateVariablesDistribution(double rho_dense, double rho_light, double nu_dense, double nu_light, double eps, const array_1d<double,3>& body_force)
{
KRATOS_TRY
for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
double dist = inode->FastGetSolutionStepValue(DISTANCE);
//calculated smoothed density and viscosity distribution
double H;
if(dist < -eps) H = 0.0;
else if(dist > eps) H = 1.0;
else H = (dist+eps)/(2.0*eps) + sin(3.141592*dist/eps)/(2.0*3.141592);
double rho_node = rho_dense + (rho_light-rho_dense)*H;
inode->FastGetSolutionStepValue(DENSITY) = rho_node;
double nu_node = nu_dense + (nu_light-nu_dense)*H;
inode->FastGetSolutionStepValue(VISCOSITY) = nu_node;
//reset variables outside of the fluid domain
if( dist < 0 )
noalias(inode->FastGetSolutionStepValue(BODY_FORCE)) = body_force;
else
{
inode->FastGetSolutionStepValue(PRESSURE) = 0.0;
noalias(inode->FastGetSolutionStepValue(BODY_FORCE)) = body_force;
noalias(inode->FastGetSolutionStepValue(VELOCITY)) = ZeroVector(3);
noalias(inode->FastGetSolutionStepValue(VELOCITY,1)) = ZeroVector(3);
}
}
KRATOS_CATCH("")
}
//********************************
//function to compute coefficients
void CalculateCoefficients(ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
unsigned int n_nodes = mPn1.size();
//pressure coefficient
#pragma omp parallel for
for ( int i_node = 0; i_node < n_nodes; i_node++)
mCp[i_node] = (mPn1[i_node] - mPinf) / mQinf;
mr_matrix_container.WriteScalarToDatabase(PRESSURE_COEFFICIENT, mCp, rNodes);
//Mach number
#pragma omp parallel for
for ( int i_node = 0; i_node < n_nodes; i_node++)
mMach[i_node] = norm_2(mvel_n1[i_node]) * sqrt(mC2inv[i_node]);
mr_matrix_container.WriteScalarToDatabase(MACH_NUMBER, mMach, rNodes);
KRATOS_CATCH("")
}
//**************************************
//function to calculate the area normals
void CalculateNormals(ModelPart::ConditionsContainerType& rConditions)
//void CalculateNormals(ModelPart::NodesContainerType& rNodes, MatrixContainer& matrix_container)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double,3> area_normal;
//2D case
if(TDim == 2)
{
for(ModelPart::ConditionsContainerType::iterator cond_it=rConditions.begin(); cond_it!=rConditions.end(); cond_it++)
CalculateNormal2D(cond_it,area_normal);
}
//3D case
else if(TDim == 3)
{
//help vectors for cross product
array_1d<double,3> v1;
array_1d<double,3> v2;
for(ModelPart::ConditionsContainerType::iterator cond_it=rConditions.begin(); cond_it!=rConditions.end(); cond_it++)
CalculateNormal3D(cond_it,area_normal,v1,v2);
}
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
mSlipNormal.resize(n_nodes);
mPressureNormal.resize(n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
noalias(mSlipNormal[i_node]) = ZeroVector(TDim);
noalias(mPressureNormal[i_node]) = ZeroVector(TDim);
}
//loop over all faces
for(ModelPart::ConditionsContainerType::iterator cond_it=rConditions.begin(); cond_it!=rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node<3> >& face_geometry = cond_it->GetGeometry();
//boolean variables to characterize faces
bool is_slip_condition = true;
bool is_pressure_face = true;
bool is_velocity_inlet = true;
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX));
//if the face contains at least 1 node that is not of slip or mixed
//then it is not a slip face
if ( static_cast<unsigned int>(mNodalFlag[i_node]) != 3 &&
static_cast<unsigned int>(mNodalFlag[i_node]) != 4)
is_slip_condition = false;
//if the face contains at least one node of pressure it is a pressure face
if ( static_cast<unsigned int>(mNodalFlag[i_node]) != 5 &&
static_cast<unsigned int>(mNodalFlag[i_node]) != 4)
is_pressure_face = false;
if (static_cast<unsigned int>(mNodalFlag[i_node]) != 1)
is_velocity_inlet = false;
}
//reference for area normal of the face
array_1d<double,3>& face_normal = cond_it->GetValue(NORMAL);
double node_factor = 1.0/TDim;
//slip condition
if (is_slip_condition == true)
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX));
array_1d<double,TDim>& slip_normal = mSlipNormal[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
slip_normal[comp] += node_factor * face_normal[comp];
}
//pressure face
if (is_pressure_face == true || is_velocity_inlet == true)
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX));
array_1d<double,TDim>& pressure_normal = mPressureNormal[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
pressure_normal[comp] += node_factor * face_normal[comp];
}
//remaining case ... add pressure to pressure nodes and slip to the others
if(is_pressure_face == false && is_slip_condition == false && is_velocity_inlet == false)
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX));
if ( static_cast<unsigned int>(mNodalFlag[i_node]) == 5) //pressure node
{
array_1d<double,TDim>& pressure_normal = mPressureNormal[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
pressure_normal[comp] += node_factor * face_normal[comp];
}
else if ( static_cast<unsigned int>(mNodalFlag[i_node]) == 3) //slip node
{
array_1d<double,TDim>& slip_normal = mPressureNormal[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
slip_normal[comp] += node_factor * face_normal[comp];
}
}
}
KRATOS_CATCH("")
}
void SetSpeedOfSound(double c, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
unsigned int n_nodes = mC2inv.size();
double temp = 1.0 / (c * c);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
mC2inv[i_node] = temp;
//WriteScalarToDatabase(LIFT_COEFFICIENT, mC2inv, rNodes);
KRATOS_CATCH("")
}
void SetFreeFlowConditions(array_1d<double, 3> velocity, double pressure, double density, double gamma)
{
KRATOS_TRY
mUinf = velocity;
mPinf = pressure;
mRhoinf = density;
mGamma = gamma;
mQinf = 0.5 * mRhoinf * norm_2(mUinf) * norm_2(mUinf);
mMachinf = norm_2(mUinf) / (sqrt(mGamma*mPinf/mRhoinf));
unsigned int n_nodes = mPn1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
mC2inv[i_node] = mRho[i_node] / (mGamma * mPn1[i_node]);
for (unsigned int i_velocity = 0; i_velocity < mVelocityInletList.size(); i_velocity++)
noalias(mVelocityInlet[i_velocity]) = velocity;
KRATOS_CATCH("")
}
//**********************************************************************
void CalculateVelocity( CalcVectorType& velocity,
const CalcVectorType& momentum,
const ValuesVectorType& rho)
{
int loop_size = velocity.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
double inv_rho = 1.0/mRho[i_node];
array_1d<double,TDim>& vel = velocity[i_node];
const array_1d<double,TDim>& mom = momentum[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
vel[comp] = mom[comp] * inv_rho;
}
}
void SetDissipationLength(double h)
{
KRATOS_TRY
mDissipationLength = h;
KRATOS_CATCH("")
}
void CalculateDrag (CalcVectorType& convective_velocity,
CalcVectorType& momentum,
CalcVectorType& drag,
const ValuesVectorType& viscosity)
{
mViscosity = viscosity;
int n_nodes = mViscosity.size();
for ( int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist < 0.0) //node is inside domain ---- if outside do nothing
{
const array_1d<double, TDim>& a_i = convective_velocity[i_node];
const array_1d<double, TDim>& U_i = momentum[i_node];
array_1d<double, TDim>& Drag_i = drag[i_node];
const double& nu_i = viscosity[i_node];
//porous contribution
double eps = mEps[i_node];
double d = mD[i_node]; //diameter of the particle
double kinv = 150.0*(1.0-eps)*(1.0-eps)/(eps*eps*eps*d*d);
double norm_u_2 = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
norm_u_2 = a_i[comp]*a_i[comp];
//CORRECTED Term
double nonlin_term = kinv * nu_i * eps + 1.75 * norm_u_2 * sqrt(kinv / ( eps * 150.0));
for (unsigned int comp = 0; comp < TDim; comp++)
Drag_i[comp] = nonlin_term * U_i[comp];
}
}
}
//*******************************
//function to free dynamic memory
void Clear()
{
KRATOS_TRY
mWork.clear();
mvel_n.clear();
mvel_n1.clear();
mA.clear();
mPn.clear();
mPn1.clear();
mHmin.clear();
mHavg.clear();
//mAreaNormal.clear();
//mvel_nitNormal.clear();
mPressureNormal.clear();
mSlipNormal.clear();
mNodalFlag.clear();
mVelocityInletList.clear();
mVelocityInlet.clear();
mPressureOutletList.clear();
mPressureOutlet.clear();
mSlipBoundaryList.clear();
mNoSlipBoundaryList.clear();
mL.clear();
mTauPressure.clear();
mTauConvection.clear();
mViscosity.clear();
mEps.clear();
mEpsOld.clear();
KRATOS_CATCH("")
}
//******************************************
void CalculateForces()
{
KRATOS_TRY
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
CalcVectorType rhs;
rhs.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes);
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes);
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, rNodes);
mr_matrix_container.FillScalarFromDatabase(DENSITY, mRho, rNodes);
mr_matrix_container.FillOldScalarFromDatabase(DENSITY, mRhoOld, rNodes);
mr_matrix_container.FillVectorFromDatabase(BODY_FORCE, mBodyForce, rNodes);
mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, rNodes);
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
#pragma omp parallel for
for ( int i_node = 0; i_node < n_nodes; i_node++)
{
// -> mCurrMom
//compute the momentum at the current step -> mCurrMom
double& rho_i = mRho[i_node];
const array_1d<double, TDim>& u_i = mvel_n1[i_node];
array_1d<double, TDim>& U_i = mCurrMom[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = rho_i * u_i[comp];
// -> mInitMom
double& rho_i_old = mRhoOld[i_node];
//compute the momentum at the beginning of the tep
const array_1d<double, TDim>& u_i_old = mvel_n[i_node];
array_1d<double, TDim>& U_i_old = mInitMom[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i_old[comp] = rho_i_old * u_i_old[comp];
//compute volumetric body force
array_1d<double, TDim>& f_i = mBodyForce[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
f_i[comp] *= rho_i;
}
//compute advective velocity - area average of the current velocity
CalculateAdvectiveVelocity(mvel_n1, mA, msmooth_convective_velocity);
//compute intrinsic time
double time_inv = 1.0/delta_t;
#pragma omp parallel for firstprivate(time_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
// double& h_i = mHavg[i_node];
double& h_i = mHmin[i_node];
array_1d<double, TDim>& a_i = mA[i_node];
const double nu_i = mViscosity[i_node];
double vel_norm = norm_2(a_i);
mTauPressure[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) );
mTauConvection[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) );
if (mTauPressure[i_node] < delta_t)
mTauPressure[i_node] = delta_t;
else if(mTauPressure[i_node] > 100.0*delta_t)
mTauPressure[i_node] = 100.0*delta_t;
}
//compute pressure switch
if (mFirstStep == false)
if(minclude_shock_capturing == true)
ComputeMonotonicityPreserving();
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mCurrMom, mPn1, mA, mBodyForce, mViscosity, rhs);
ValuesVectorType& lumped_mass = mr_matrix_container.GetLumpedMass();
//add inertia term
#pragma omp parallel for firstprivate(time_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& curr_mom_i = mCurrMom[i_node];
const array_1d<double, TDim>& old_mom_i = mInitMom[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp]-=time_inv*lumped_mass[i_node]*(curr_mom_i[comp]-old_mom_i[comp]);
//change of sign
/* for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = -rhs_i[comp];*/
}
mr_matrix_container.WriteVectorToDatabase(FORCE, rhs, mr_model_part.Nodes());
KRATOS_CATCH("")
}
private:
MatrixContainer& mr_matrix_container;
ModelPart& mr_model_part;
bool msmooth_convective_velocity;
bool minclude_shock_capturing;
//nodal values
//velocity vector U at time steps n and n+1
CalcVectorType mWork, mvel_n, mvel_n1, mInitMom, mCurrMom, mFracMom, mx;
//pressure vector p at time steps n and n+1
ValuesVectorType mPn, mPn1, mViscosity;
//monotony preserving term
ValuesVectorType mBeta;
//density
ValuesVectorType mRho, mRhoOld;
//compressibility parameter
ValuesVectorType mC2inv;
double mGamma;
double mQinf;
array_1d<double, TDim> mUinf;
double mPinf;
double mRhoinf;
double mMachinf;
//coefficients
ValuesVectorType mCp, mMach, mdistances;
//advective velocity vector
CalcVectorType mA;
//minimum length of the edges surrounding edges surrounding each nodal point
ValuesVectorType mHmin;
ValuesVectorType mHavg;
ValuesVectorType mEps;
ValuesVectorType mEpsOld;
ValuesVectorType mD;
CalcVectorType mEdgeDimensions;
double mDissipationLength;
//area normal
//CalcVectorType mAreaNormal, mvel_nitNormal;
CalcVectorType mPressureNormal, mSlipNormal;
//projection terms
CalcVectorType mPi, mXi;
CalcVectorType mBodyForce, mDrag;
//flag for first time step
bool mFirstStep;
//flag to differentiate interior and boundary nodes
ValuesVectorType mNodalFlag;
//lists of nodes with different types of boundary conditions
IndicesVectorType mSlipBoundaryList, mNoSlipBoundaryList, mPressureOutletList, mVelocityInletList;
IndicesVectorType mDissipationList;
CalcVectorType mVelocityInlet;
ValuesVectorType mPressureOutlet, mDensityInlet;
//list for pressure boundary faces
ModelPart::ConditionsContainerType mPressureFaces;
//intrinsic time step size
ValuesVectorType mTauPressure;
ValuesVectorType mTauConvection;
//variables for resolving pressure equation
//laplacian matrix
TSystemMatrixType mL;
//***********************************************************
//functions to calculate area normals for boundary conditions
void CalculateNormal2D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double,3>& area_normal)
{
Geometry<Node<3> >& face_geometry = (cond_it)->GetGeometry();
area_normal[0] = face_geometry[1].Y() - face_geometry[0].Y();
area_normal[1] = - (face_geometry[1].X() - face_geometry[0].X());
area_normal[2] = 0.00;
noalias((cond_it)->GetValue(NORMAL)) = area_normal;
}
void CalculateNormal3D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double,3>& area_normal, array_1d<double,3>& v1,array_1d<double,3>& v2 )
{
Geometry<Node<3> >& face_geometry = (cond_it)->GetGeometry();
v1[0] = face_geometry[1].X() - face_geometry[0].X();
v1[1] = face_geometry[1].Y() - face_geometry[0].Y();
v1[2] = face_geometry[1].Z() - face_geometry[0].Z();
v2[0] = face_geometry[2].X() - face_geometry[0].X();
v2[1] = face_geometry[2].Y() - face_geometry[0].Y();
v2[2] = face_geometry[2].Z() - face_geometry[0].Z();
MathUtils<double>::CrossProduct(area_normal,v1,v2);
area_normal *= -0.5;
noalias((cond_it)->GetValue(NORMAL)) = area_normal;
}
//******************************************
//function to calculate advective velocities
void CalculateAdvectiveVelocity(const CalcVectorType& rVelocity, CalcVectorType& rAdvectiveVelocity, bool smooth_convective_velocity)
{
KRATOS_TRY
if(smooth_convective_velocity == true)
{
//get number of nodes
int n_nodes = rVelocity.size();
//initialize advective velocities
/* #pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
noalias(rAdvectiveVelocity[i_node]) = ZeroVector(TDim);*/
//loop over all nodes
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
//reference for advective velocity of node i
array_1d<double, TDim>& a_i = rAdvectiveVelocity[i_node];
noalias(a_i) = ZeroVector(TDim);
//setting weighting mass to zero
double mass_sum = 0.0;
//loop over all neighbours
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
//add consistent mass of edge ij to denominator
double& m_ij = mr_matrix_container.GetEdgeValues()[csr_index].Mass;
mass_sum += m_ij;
//reference for velocity of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& u_j = rVelocity[j_neighbour];
//add contributions of numerator componentwisely
for (unsigned int comp = 0; comp < TDim; comp++)
a_i[comp] += m_ij * u_j[comp];
}
//for Dirichlet boundary nodes lumped values have to be included
//attention: nodes with Neumann pressure condition are treated as interior points!
if ((static_cast<unsigned int>(mNodalFlag[i_node]) != 0) && (static_cast<unsigned int>(mNodalFlag[i_node]) != 5) && (static_cast<unsigned int>(mNodalFlag[i_node]) != 4))
{
//taking into account diagonal matrix elements
double m_ii = mr_matrix_container.GetLumpedMass()[i_node] - mass_sum;
const array_1d<double, TDim>& u_i = rVelocity[i_node];
//add contribution to advective velocity
for (unsigned int comp = 0; comp < TDim; comp++)
a_i[comp] += m_ii * u_i[comp];
//add contribution to mass sum
mass_sum += m_ii;
}
//weighting contributions by the mass sum of all (surrounding) edges
for (unsigned int comp = 0; comp < TDim; comp++)
a_i[comp] /= mass_sum;
}
}
else
{
//get number of nodes
int n_nodes = rVelocity.size();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& aaa = rAdvectiveVelocity[i_node];
const array_1d<double, TDim>& u_i = rVelocity[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
aaa[comp] = u_i[comp];
}
// noalias(rAdvectiveVelocity[i_node]) = mvel_n1[i_node];
}
// for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
// noalias(rAdvectiveVelocity[i_node]) = mvel_n1[i_node];
KRATOS_CATCH("")
}
//*********************************************************
//function to calculate minimum length of surrounding edges
void CalculateEdgeLengths(ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = rNodes.size();
//reserve memory for storage of nodal coordinates
std::vector< array_1d<double, 3> > position;
position.resize(n_nodes);
//get position of all nodes
for (typename ModelPart::NodesContainerType::iterator node_it=rNodes.begin(); node_it!=rNodes.end(); node_it++)
{
//get the global index of the node
unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
//save its coordinates locally
noalias(position[i_node]) = node_it->Coordinates();
//initialize minimum edge length with relatively big values
mHmin[i_node] = 1e10;
}
ValuesVectorType& aaa = mr_matrix_container.GetHmin();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
mHmin[i_node] = aaa[i_node];
}
//take unstructured meshes into account
if(TDim == 2)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
// double& rho_i = mRho[i_node];
h_i = sqrt(2.0*m_i);
}
}
else if(TDim == 3)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
// double& rho_i = mRho[i_node];
h_i = pow (6.0*m_i, 1.0/3.0);
}
}
//compute edge coordinates
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, 3>& pos_i = position[i_node];
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, 3>& pos_j = position[j_neighbour];
array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index];
for (unsigned int comp = 0; comp < TDim; comp++)
l_k[comp] = pos_i[comp] - pos_j[comp];
}
}
KRATOS_CATCH("")
}
//*******************************************************
//function to calculate monotonicity preserving term beta
void ComputeMonotonicityPreserving()
{
KRATOS_TRY
unsigned int n_nodes = mPn1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& p_i = mPn1[i_node];
array_1d<double, TDim>& xi_i = mXi[i_node];
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
double& p_j = mPn1[j_neighbour];
array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index];
array_1d<double, TDim>& xi_j = mXi[j_neighbour];
double press_diff = p_i - p_j;
double proj_sum = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
proj_sum += l_k[comp] * (xi_i[comp] + xi_j[comp]);
proj_sum *= 0.5;
double temp = fabs(press_diff) + fabs(proj_sum);
if (temp <= 1e-10)
mBeta[csr_index] = 1.0;
else
// mBeta[csr_index] = 1.0 - fabs(fabs(press_diff) - fabs(proj_sum)) / temp;
mBeta[csr_index] = 1.0 - fabs(press_diff + proj_sum) / temp;
/*mBeta[csr_index]=1.0;*/
/* if (mNodalFlag[i_node] == 1.0 || mNodalFlag[i_node] == 4.0 || mNodalFlag[i_node] == 5.0 || mNodalFlag[j_neighbour] == 1.0 || mNodalFlag[j_neighbour] == 4.0 || mNodalFlag[j_neighbour] == 5.0)
mBeta[csr_index] = 0.0;*/
/*if (mBeta[csr_index]<0.0 && mBeta[csr_index]>1.0)
KRATOS_WATCH(mBeta[csr_index]);*/
}
}
KRATOS_CATCH("")
}
inline double CalculateEdgeTau( const double time_inv, const double h_i,
const array_1d<double,TDim>& v_i,
const double h_j,
const array_1d<double,TDim>& v_j)
{
double h_avg = 0.5 * (h_i+h_j);
//calculating norm o
double norm_avg = 0.0;
for(unsigned int k=0; k<TDim; k++)
norm_avg += pow(v_i[k] + v_j[k],2);
norm_avg *= 0.25;
norm_avg = sqrt(norm_avg);
return 1.0 / (2.0 * norm_avg/h_avg + time_inv + 1e-6 /(h_avg*h_avg) );
}
void DivideByPorosity(CalcVectorType& r_destination,const CalcVectorType& r_origin, const ValuesVectorType& porosity)
{
int n_nodes = r_origin.size();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& dest = r_destination[i_node];
const array_1d<double, TDim>& orig = r_origin[i_node];
double factor = 1.0/porosity[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
dest[comp] = factor * orig[comp];
}
}
void MultiplyByPorosity(CalcVectorType& r_destination, const CalcVectorType& r_origin, const ValuesVectorType& porosity)
{
int n_nodes = r_origin.size();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& dest = r_destination[i_node];
const array_1d<double, TDim>& orig = r_origin[i_node];
double factor = porosity[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
dest[comp] = factor * orig[comp];
}
}
};
} //namespace Kratos
#endif //KRATOS_LEVELSET_FLUID_SOLVER_H_INCLUDED defined
|
par_csr_matop_device.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_utilities.h"
#include "_hypre_parcsr_mv.h"
#include "_hypre_utilities.hpp"
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_Int
hypre_ParcsrGetExternalRowsDeviceInit( hypre_ParCSRMatrix *A,
HYPRE_Int indices_len,
HYPRE_BigInt *indices,
hypre_ParCSRCommPkg *comm_pkg,
HYPRE_Int want_data,
void **request_ptr)
{
HYPRE_Int i, j;
HYPRE_Int num_sends, num_rows_send, num_nnz_send, num_recvs, num_rows_recv, num_nnz_recv;
HYPRE_Int *d_send_i, *send_i, *d_send_map, *d_recv_i, *recv_i;
HYPRE_BigInt *d_send_j, *d_recv_j;
HYPRE_Int *send_jstarts, *recv_jstarts;
HYPRE_Complex *d_send_a = NULL, *d_recv_a = NULL;
hypre_ParCSRCommPkg *comm_pkg_j;
hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a;
/* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/* HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); */
/* HYPRE_Int first_row = hypre_ParCSRMatrixFirstRowIndex(A); */
HYPRE_Int first_col = hypre_ParCSRMatrixFirstColDiag(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A);
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int num_procs;
HYPRE_Int my_id;
void **vrequest;
hypre_CSRMatrix *A_ext;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* number of sends (#procs) */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* number of rows to send */
num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
/* number of recvs (#procs) */
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
/* number of rows to recv */
num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs);
/* must be true if indices contains proper offd indices */
hypre_assert(indices_len == num_rows_recv);
/* send_i/recv_i:
* the arrays to send and recv: we first send and recv the row lengths */
d_send_i = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_DEVICE);
d_send_map = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE);
send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST);
recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST);
d_recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE);
/* fill the send array with row lengths */
hypre_TMemcpy(d_send_map, hypre_ParCSRCommPkgSendMapElmts(comm_pkg), HYPRE_Int,
num_rows_send, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_Memset(d_send_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(num_rows_send, d_send_map, A_diag_i, A_offd_i, d_send_i + 1);
/* send array send_i out: deviceTohost first and MPI (async)
* note the shift in recv_i by one */
hypre_TMemcpy(send_i, d_send_i + 1, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i + 1);
hypreDevice_IntegerInclusiveScan(num_rows_send + 1, d_send_i);
/* total number of nnz to send */
hypre_TMemcpy(&num_nnz_send, d_send_i + num_rows_send, HYPRE_Int, 1, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
/* prepare data to send out. overlap with the above commmunication */
d_send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_DEVICE);
if (want_data)
{
d_send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_DEVICE);
}
if (d_col_map_offd_A == NULL)
{
d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A;
}
/* job == 2, d_send_i is input that contains row ptrs (length num_rows_send) */
hypreDevice_CopyParCSRRows(num_rows_send, d_send_map, 2, num_procs > 1,
first_col, d_col_map_offd_A,
A_diag_i, A_diag_j, A_diag_a,
A_offd_i, A_offd_j, A_offd_a,
d_send_i, d_send_j, d_send_a);
/* pointers to each proc in send_j */
send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);
send_jstarts[0] = 0;
for (i = 1; i <= num_sends; i++)
{
send_jstarts[i] = send_jstarts[i - 1];
for ( j = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i - 1);
j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
j++ )
{
send_jstarts[i] += send_i[j];
}
}
hypre_assert(send_jstarts[num_sends] == num_nnz_send);
/* finish the above communication: send_i/recv_i */
hypre_ParCSRCommHandleDestroy(comm_handle);
/* adjust recv_i to ptrs */
recv_i[0] = 0;
for (i = 1; i <= num_rows_recv; i++)
{
recv_i[i] += recv_i[i - 1];
}
num_nnz_recv = recv_i[num_rows_recv];
/* allocate device memory for j and a */
d_recv_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_DEVICE);
if (want_data)
{
d_recv_a = hypre_TAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_DEVICE);
}
recv_jstarts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST);
recv_jstarts[0] = 0;
for (i = 1; i <= num_recvs; i++)
{
j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i);
recv_jstarts[i] = recv_i[j];
}
/* ready to send and recv: create a communication package for data */
comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm (comm_pkg_j) = comm;
hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends;
hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg);
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts;
hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs;
hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts;
/* init communication */
/* ja */
comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j,
HYPRE_MEMORY_DEVICE, d_send_j,
HYPRE_MEMORY_DEVICE, d_recv_j);
if (want_data)
{
/* a */
comm_handle_a = hypre_ParCSRCommHandleCreate_v2(1, comm_pkg_j,
HYPRE_MEMORY_DEVICE, d_send_a,
HYPRE_MEMORY_DEVICE, d_recv_a);
}
else
{
comm_handle_a = NULL;
}
hypre_TMemcpy(d_recv_i, recv_i, HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_HOST);
/* create A_ext: on device */
A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv);
hypre_CSRMatrixI (A_ext) = d_recv_i;
hypre_CSRMatrixBigJ(A_ext) = d_recv_j;
hypre_CSRMatrixData(A_ext) = d_recv_a;
hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_DEVICE;
/* output */
vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST);
vrequest[0] = (void *) comm_handle_j;
vrequest[1] = (void *) comm_handle_a;
vrequest[2] = (void *) A_ext;
*request_ptr = (void *) vrequest;
/* free */
hypre_TFree(send_i, HYPRE_MEMORY_HOST);
hypre_TFree(recv_i, HYPRE_MEMORY_HOST);
hypre_TFree(d_send_i, HYPRE_MEMORY_DEVICE);
hypre_TFree(d_send_map, HYPRE_MEMORY_DEVICE);
hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
hypre_CSRMatrix*
hypre_ParcsrGetExternalRowsDeviceWait(void *vrequest)
{
void **request = (void **) vrequest;
hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0];
hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1];
hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2];
HYPRE_BigInt *send_j = comm_handle_j ? (HYPRE_BigInt *)
hypre_ParCSRCommHandleSendData(comm_handle_j) : NULL;
HYPRE_Complex *send_a = comm_handle_a ? (HYPRE_Complex *)
hypre_ParCSRCommHandleSendData(comm_handle_a) : NULL;
hypre_ParCSRCommHandleDestroy(comm_handle_j);
hypre_ParCSRCommHandleDestroy(comm_handle_a);
hypre_TFree(send_j, HYPRE_MEMORY_DEVICE);
hypre_TFree(send_a, HYPRE_MEMORY_DEVICE);
hypre_TFree(request, HYPRE_MEMORY_HOST);
return A_ext;
}
hypre_CSRMatrix*
hypre_MergeDiagAndOffdDevice(hypre_ParCSRMatrix *A)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt glbal_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A);
hypre_CSRMatrix *B;
HYPRE_Int B_nrows = local_num_rows;
HYPRE_BigInt B_ncols = glbal_num_cols;
HYPRE_Int *B_i = hypre_TAlloc(HYPRE_Int, B_nrows + 1, HYPRE_MEMORY_DEVICE);
HYPRE_BigInt *B_j;
HYPRE_Complex *B_a;
HYPRE_Int B_nnz;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_Memset(B_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(B_nrows, NULL, A_diag_i, A_offd_i, B_i + 1);
hypreDevice_IntegerInclusiveScan(B_nrows + 1, B_i);
/* total number of nnz */
hypre_TMemcpy(&B_nnz, B_i + B_nrows, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
B_j = hypre_TAlloc(HYPRE_BigInt, B_nnz, HYPRE_MEMORY_DEVICE);
B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE);
if (d_col_map_offd_A == NULL)
{
d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A;
}
hypreDevice_CopyParCSRRows(B_nrows, NULL, 2, num_procs > 1, first_col, d_col_map_offd_A,
A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a,
B_i, B_j, B_a);
/* output */
B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz);
hypre_CSRMatrixI (B) = B_i;
hypre_CSRMatrixBigJ(B) = B_j;
hypre_CSRMatrixData(B) = B_a;
hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE;
hypre_SyncCudaComputeStream(hypre_handle());
return B;
}
HYPRE_Int
hypre_ExchangeExternalRowsDeviceInit( hypre_CSRMatrix *B_ext,
hypre_ParCSRCommPkg *comm_pkg_A,
HYPRE_Int want_data,
void **request_ptr)
{
MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A);
HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A);
HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A);
HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A);
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A);
HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A);
HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A);
HYPRE_Int num_elmts_send = send_map_starts[num_sends];
HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs];
HYPRE_Int *B_ext_i_d = hypre_CSRMatrixI(B_ext);
HYPRE_BigInt *B_ext_j_d = hypre_CSRMatrixBigJ(B_ext);
HYPRE_Complex *B_ext_a_d = hypre_CSRMatrixData(B_ext);
HYPRE_Int B_ext_ncols = hypre_CSRMatrixNumCols(B_ext);
HYPRE_Int B_ext_nrows = hypre_CSRMatrixNumRows(B_ext);
HYPRE_Int B_ext_nnz = hypre_CSRMatrixNumNonzeros(B_ext);
HYPRE_Int *B_ext_rownnz_d = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_DEVICE);
HYPRE_Int *B_ext_rownnz_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST);
HYPRE_Int *B_ext_i_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_HOST);
hypre_assert(num_elmts_recv == B_ext_nrows);
/* output matrix */
hypre_CSRMatrix *B_int_d;
HYPRE_Int B_int_nrows = num_elmts_send;
HYPRE_Int B_int_ncols = B_ext_ncols;
HYPRE_Int *B_int_i_h = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST);
HYPRE_Int *B_int_i_d = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE);
HYPRE_BigInt *B_int_j_d = NULL;
HYPRE_Complex *B_int_a_d = NULL;
HYPRE_Int B_int_nnz;
hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a;
hypre_ParCSRCommPkg *comm_pkg_j;
HYPRE_Int *jdata_recv_vec_starts;
HYPRE_Int *jdata_send_map_starts;
HYPRE_Int i;
HYPRE_Int num_procs, my_id;
void **vrequest;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);
/*--------------------------------------------------------------------------
* B_ext_rownnz contains the number of elements of row j
* (to be determined through send_map_elmnts on the receiving end)
*--------------------------------------------------------------------------*/
HYPRE_THRUST_CALL(adjacent_difference, B_ext_i_d, B_ext_i_d + B_ext_nrows + 1, B_ext_rownnz_d);
hypre_TMemcpy(B_ext_rownnz_h, B_ext_rownnz_d + 1, HYPRE_Int, B_ext_nrows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
/*--------------------------------------------------------------------------
* initialize communication: send/recv the row nnz
* (note the use of comm_pkg_A, mode 12, as in transpose matvec
*--------------------------------------------------------------------------*/
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz_h, B_int_i_h + 1);
jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST);
jdata_recv_vec_starts[0] = 0;
B_ext_i_h[0] = 0;
hypre_TMemcpy(B_ext_i_h + 1, B_ext_rownnz_h, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
for (i = 1; i <= B_ext_nrows; i++)
{
B_ext_i_h[i] += B_ext_i_h[i - 1];
}
hypre_assert(B_ext_i_h[B_ext_nrows] == B_ext_nnz);
for (i = 1; i <= num_recvs; i++)
{
jdata_recv_vec_starts[i] = B_ext_i_h[recv_vec_starts[i]];
}
comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(comm_pkg_j) = comm;
hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs;
hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends;
hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs;
hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs;
hypre_ParCSRCommHandleDestroy(comm_handle);
/*--------------------------------------------------------------------------
* compute B_int: row nnz to row ptrs
*--------------------------------------------------------------------------*/
B_int_i_h[0] = 0;
for (i = 1; i <= B_int_nrows; i++)
{
B_int_i_h[i] += B_int_i_h[i - 1];
}
B_int_nnz = B_int_i_h[B_int_nrows];
B_int_j_d = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_DEVICE);
if (want_data)
{
B_int_a_d = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_DEVICE);
}
for (i = 0; i <= num_sends; i++)
{
jdata_send_map_starts[i] = B_int_i_h[send_map_starts[i]];
}
/* note the order of send/recv is reversed */
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts;
/* send/recv CSR rows */
if (want_data)
{
comm_handle_a = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg_j,
HYPRE_MEMORY_DEVICE, B_ext_a_d,
HYPRE_MEMORY_DEVICE, B_int_a_d );
}
else
{
comm_handle_a = NULL;
}
comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j,
HYPRE_MEMORY_DEVICE, B_ext_j_d,
HYPRE_MEMORY_DEVICE, B_int_j_d );
hypre_TMemcpy(B_int_i_d, B_int_i_h, HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_HOST);
/* create CSR: on device */
B_int_d = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz);
hypre_CSRMatrixI(B_int_d) = B_int_i_d;
hypre_CSRMatrixBigJ(B_int_d) = B_int_j_d;
hypre_CSRMatrixData(B_int_d) = B_int_a_d;
hypre_CSRMatrixMemoryLocation(B_int_d) = HYPRE_MEMORY_DEVICE;
/* output */
vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST);
vrequest[0] = (void *) comm_handle_j;
vrequest[1] = (void *) comm_handle_a;
vrequest[2] = (void *) B_int_d;
*request_ptr = (void *) vrequest;
/* free */
hypre_TFree(B_ext_rownnz_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(B_ext_rownnz_h, HYPRE_MEMORY_HOST);
hypre_TFree(B_ext_i_h, HYPRE_MEMORY_HOST);
hypre_TFree(B_int_i_h, HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
hypre_CSRMatrix*
hypre_ExchangeExternalRowsDeviceWait(void *vrequest)
{
void **request = (void **) vrequest;
hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0];
hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1];
hypre_CSRMatrix *B_int_d = (hypre_CSRMatrix *) request[2];
/* communication done */
hypre_ParCSRCommHandleDestroy(comm_handle_j);
hypre_ParCSRCommHandleDestroy(comm_handle_a);
hypre_TFree(request, HYPRE_MEMORY_HOST);
return B_int_d;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
HYPRE_Int
hypre_ParCSRMatrixExtractBExtDeviceInit( hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix *A,
HYPRE_Int want_data,
void **request_ptr)
{
hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) ==
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) );
/*
hypre_assert( hypre_GetActualMemLocation(
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B))) == HYPRE_MEMORY_DEVICE );
*/
if (!hypre_ParCSRMatrixCommPkg(A))
{
hypre_MatvecCommPkgCreate(A);
}
hypre_ParcsrGetExternalRowsDeviceInit(B,
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)),
hypre_ParCSRMatrixColMapOffd(A),
hypre_ParCSRMatrixCommPkg(A),
want_data,
request_ptr);
return hypre_error_flag;
}
hypre_CSRMatrix*
hypre_ParCSRMatrixExtractBExtDeviceWait(void *request)
{
return hypre_ParcsrGetExternalRowsDeviceWait(request);
}
hypre_CSRMatrix*
hypre_ParCSRMatrixExtractBExtDevice( hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix *A,
HYPRE_Int want_data )
{
void *request;
hypre_ParCSRMatrixExtractBExtDeviceInit(B, A, want_data, &request);
return hypre_ParCSRMatrixExtractBExtDeviceWait(request);
}
/* return B = [Adiag, Aoffd] */
#if 1
__global__ void
hypreCUDAKernel_ConcatDiagAndOffd(HYPRE_Int nrows, HYPRE_Int diag_ncol,
HYPRE_Int *d_diag_i, HYPRE_Int *d_diag_j, HYPRE_Complex *d_diag_a,
HYPRE_Int *d_offd_i, HYPRE_Int *d_offd_j, HYPRE_Complex *d_offd_a,
HYPRE_Int *cols_offd_map,
HYPRE_Int *d_ib, HYPRE_Int *d_jb, HYPRE_Complex *d_ab)
{
const HYPRE_Int row = hypre_cuda_get_grid_warp_id<1, 1>();
if (row >= nrows)
{
return;
}
/* lane id inside the warp */
const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>();
HYPRE_Int i, j, k, p, istart, iend, bstart;
/* diag part */
if (lane_id < 2)
{
j = read_only_load(d_diag_i + row + lane_id);
}
if (lane_id == 0)
{
k = read_only_load(d_ib + row);
}
istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0);
iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1);
bstart = __shfl_sync(HYPRE_WARP_FULL_MASK, k, 0);
p = bstart - istart;
for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE)
{
d_jb[p + i] = read_only_load(d_diag_j + i);
d_ab[p + i] = read_only_load(d_diag_a + i);
}
/* offd part */
if (lane_id < 2)
{
j = read_only_load(d_offd_i + row + lane_id);
}
bstart += iend - istart;
istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0);
iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1);
p = bstart - istart;
for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE)
{
const HYPRE_Int t = read_only_load(d_offd_j + i);
d_jb[p + i] = (cols_offd_map ? read_only_load(&cols_offd_map[t]) : t) + diag_ncol;
d_ab[p + i] = read_only_load(d_offd_a + i);
}
}
hypre_CSRMatrix*
hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *B = hypre_CSRMatrixCreate( hypre_CSRMatrixNumRows(A_diag),
hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd),
hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) );
hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(hypre_CSRMatrixNumRows(B), NULL, hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B));
HYPRE_THRUST_CALL( exclusive_scan,
hypre_CSRMatrixI(B),
hypre_CSRMatrixI(B) + hypre_CSRMatrixNumRows(B) + 1,
hypre_CSRMatrixI(B) );
const dim3 bDim = hypre_GetDefaultCUDABlockDimension();
const dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd,
gDim, bDim,
hypre_CSRMatrixNumRows(A_diag),
hypre_CSRMatrixNumCols(A_diag),
hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixJ(A_diag),
hypre_CSRMatrixData(A_diag),
hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixJ(A_offd),
hypre_CSRMatrixData(A_offd),
NULL,
hypre_CSRMatrixI(B),
hypre_CSRMatrixJ(B),
hypre_CSRMatrixData(B) );
return B;
}
#else
hypre_CSRMatrix*
hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd);
hypre_CSRMatrix *B;
HYPRE_Int B_nrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int B_ncols = hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz;
HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE);
HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE);
HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE);
// Adiag
HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_diag_nnz, A_diag_i);
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)),
A_diag_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) );
hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE);
// Aoffd
HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_offd_nnz, A_offd_i);
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)),
A_offd_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz );
hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( transform,
A_offd_j,
A_offd_j + A_offd_nnz,
thrust::make_constant_iterator(hypre_CSRMatrixNumCols(A_diag)),
B_j + A_diag_nnz,
thrust::plus<HYPRE_Int>() );
// B
HYPRE_THRUST_CALL( stable_sort_by_key,
B_ii,
B_ii + B_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) );
HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(B_nrows, B_nnz, B_ii);
hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE);
B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz);
hypre_CSRMatrixI(B) = B_i;
hypre_CSRMatrixJ(B) = B_j;
hypre_CSRMatrixData(B) = B_a;
hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE;
return B;
}
#endif
/* return B = [Adiag, Aoffd; E] */
#if 1
HYPRE_Int
hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A,
hypre_CSRMatrix *E,
hypre_CSRMatrix **B_ptr,
HYPRE_Int *num_cols_offd_ptr,
HYPRE_BigInt **cols_map_offd_ptr)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *E_diag, *E_offd, *B;
HYPRE_Int *cols_offd_map, num_cols_offd;
HYPRE_BigInt *cols_map_offd;
hypre_CSRMatrixSplitDevice(E, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A),
hypre_CSRMatrixNumCols(A_offd), hypre_ParCSRMatrixDeviceColMapOffd(A),
&cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag, &E_offd);
B = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E),
hypre_ParCSRMatrixNumCols(A) + num_cols_offd,
hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) +
hypre_CSRMatrixNumNonzeros(E));
hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(hypre_ParCSRMatrixNumRows(A), NULL, hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B));
HYPRE_THRUST_CALL( exclusive_scan,
hypre_CSRMatrixI(B),
hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1,
hypre_CSRMatrixI(B) );
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_ParCSRMatrixNumRows(A), "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd,
gDim, bDim,
hypre_CSRMatrixNumRows(A_diag),
hypre_CSRMatrixNumCols(A_diag),
hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixJ(A_diag),
hypre_CSRMatrixData(A_diag),
hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixJ(A_offd),
hypre_CSRMatrixData(A_offd),
cols_offd_map,
hypre_CSRMatrixI(B),
hypre_CSRMatrixJ(B),
hypre_CSRMatrixData(B) );
hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(E) + 1,
HYPRE_Int, hypre_CSRMatrixNumRows(E),
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( transform,
hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1,
hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E) + 1,
thrust::make_constant_iterator(hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(
A_offd)),
hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1,
thrust::plus<HYPRE_Int>() );
gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(E), "warp", bDim);
hypre_assert(hypre_CSRMatrixNumCols(E_diag) == hypre_CSRMatrixNumCols(A_diag));
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd,
gDim, bDim,
hypre_CSRMatrixNumRows(E_diag),
hypre_CSRMatrixNumCols(E_diag),
hypre_CSRMatrixI(E_diag),
hypre_CSRMatrixJ(E_diag),
hypre_CSRMatrixData(E_diag),
hypre_CSRMatrixI(E_offd),
hypre_CSRMatrixJ(E_offd),
hypre_CSRMatrixData(E_offd),
NULL,
hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A),
hypre_CSRMatrixJ(B),
hypre_CSRMatrixData(B) );
hypre_CSRMatrixDestroy(E_diag);
hypre_CSRMatrixDestroy(E_offd);
*B_ptr = B;
*num_cols_offd_ptr = num_cols_offd;
*cols_map_offd_ptr = cols_map_offd;
return hypre_error_flag;
}
#else
HYPRE_Int
hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A,
hypre_CSRMatrix *E,
hypre_CSRMatrix **B_ptr,
HYPRE_Int *num_cols_offd_ptr,
HYPRE_BigInt **cols_map_offd_ptr)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd);
HYPRE_BigInt first_col_A = hypre_ParCSRMatrixFirstColDiag(A);
HYPRE_BigInt last_col_A = hypre_ParCSRMatrixLastColDiag(A);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A);
HYPRE_Int *E_i = hypre_CSRMatrixI(E);
HYPRE_BigInt *E_bigj = hypre_CSRMatrixBigJ(E);
HYPRE_Complex *E_a = hypre_CSRMatrixData(E);
HYPRE_Int E_nrows = hypre_CSRMatrixNumRows(E);
HYPRE_Int E_nnz = hypre_CSRMatrixNumNonzeros(E);
HYPRE_Int E_diag_nnz, E_offd_nnz;
hypre_CSRMatrix *B;
HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz + E_nnz;
HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE);
HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE);
HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE);
// E
hypre_CSRMatrixSplitDevice_core(0, E_nrows, E_nnz, NULL, E_bigj, NULL, NULL, first_col_A,
last_col_A, num_cols_offd_A,
NULL, NULL, NULL, NULL, &E_diag_nnz, NULL, NULL, NULL, NULL, &E_offd_nnz,
NULL, NULL, NULL, NULL);
HYPRE_Int *cols_offd_map, num_cols_offd;
HYPRE_BigInt *cols_map_offd;
HYPRE_Int *E_ii = hypreDevice_CsrRowPtrsToIndices(E_nrows, E_nnz, E_i);
hypre_CSRMatrixSplitDevice_core(1,
E_nrows, E_nnz, E_ii, E_bigj, E_a, NULL,
first_col_A, last_col_A, num_cols_offd_A, col_map_offd_A,
&cols_offd_map, &num_cols_offd, &cols_map_offd,
&E_diag_nnz,
B_ii + A_diag_nnz + A_offd_nnz,
B_j + A_diag_nnz + A_offd_nnz,
B_a + A_diag_nnz + A_offd_nnz,
NULL,
&E_offd_nnz,
B_ii + A_diag_nnz + A_offd_nnz + E_diag_nnz,
B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz,
B_a + A_diag_nnz + A_offd_nnz + E_diag_nnz,
NULL);
hypre_TFree(E_ii, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( transform,
B_ii + A_diag_nnz + A_offd_nnz,
B_ii + B_nnz,
thrust::make_constant_iterator(A_nrows),
B_ii + A_diag_nnz + A_offd_nnz,
thrust::plus<HYPRE_Int>() );
// Adiag
HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_diag_nnz, A_diag_i);
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)),
A_diag_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) );
hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE);
// Aoffd
HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_offd_nnz, A_offd_i);
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)),
A_offd_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz );
hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( gather,
A_offd_j,
A_offd_j + A_offd_nnz,
cols_offd_map,
B_j + A_diag_nnz);
hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( transform,
B_j + A_diag_nnz,
B_j + A_diag_nnz + A_offd_nnz,
thrust::make_constant_iterator(A_ncols),
B_j + A_diag_nnz,
thrust::plus<HYPRE_Int>() );
HYPRE_THRUST_CALL( transform,
B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz,
B_j + B_nnz,
thrust::make_constant_iterator(A_ncols),
B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz,
thrust::plus<HYPRE_Int>() );
// B
HYPRE_THRUST_CALL( stable_sort_by_key,
B_ii,
B_ii + B_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) );
HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(A_nrows + E_nrows, B_nnz, B_ii);
hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE);
B = hypre_CSRMatrixCreate(A_nrows + E_nrows, A_ncols + num_cols_offd, B_nnz);
hypre_CSRMatrixI(B) = B_i;
hypre_CSRMatrixJ(B) = B_j;
hypre_CSRMatrixData(B) = B_a;
hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE;
*B_ptr = B;
*num_cols_offd_ptr = num_cols_offd;
*cols_map_offd_ptr = cols_map_offd;
return hypre_error_flag;
}
#endif
HYPRE_Int
hypre_ParCSRMatrixGetRowDevice( hypre_ParCSRMatrix *mat,
HYPRE_BigInt row,
HYPRE_Int *size,
HYPRE_BigInt **col_ind,
HYPRE_Complex **values )
{
HYPRE_Int nrows, local_row;
HYPRE_BigInt row_start, row_end;
hypre_CSRMatrix *Aa;
hypre_CSRMatrix *Ba;
if (!mat)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat);
Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat);
if (hypre_ParCSRMatrixGetrowactive(mat))
{
return (-1);
}
hypre_ParCSRMatrixGetrowactive(mat) = 1;
row_start = hypre_ParCSRMatrixFirstRowIndex(mat);
row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1;
nrows = row_end - row_start;
if (row < row_start || row >= row_end)
{
return (-1);
}
local_row = row - row_start;
/* if buffer is not allocated and some information is requested, allocate buffer with the max row_nnz */
if ( !hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values) )
{
HYPRE_Int max_row_nnz;
HYPRE_Int *row_nnz = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(nrows, NULL, hypre_CSRMatrixI(Aa), hypre_CSRMatrixI(Ba), row_nnz);
hypre_TMemcpy(size, row_nnz + local_row, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
max_row_nnz = HYPRE_THRUST_CALL(reduce, row_nnz, row_nnz + nrows, 0, thrust::maximum<HYPRE_Int>());
/*
HYPRE_Int *max_row_nnz_d = HYPRE_THRUST_CALL(max_element, row_nnz, row_nnz + nrows);
hypre_TMemcpy( &max_row_nnz, max_row_nnz_d,
HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE );
*/
hypre_TFree(row_nnz, HYPRE_MEMORY_DEVICE);
hypre_ParCSRMatrixRowvalues(mat) =
(HYPRE_Complex *) hypre_TAlloc(HYPRE_Complex, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat));
hypre_ParCSRMatrixRowindices(mat) =
(HYPRE_BigInt *) hypre_TAlloc(HYPRE_BigInt, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat));
}
else
{
HYPRE_Int *size_d = hypre_TAlloc(HYPRE_Int, 1, HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(1, NULL, hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixI(Ba) + local_row,
size_d);
hypre_TMemcpy(size, size_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_TFree(size_d, HYPRE_MEMORY_DEVICE);
}
if (col_ind || values)
{
if (hypre_ParCSRMatrixDeviceColMapOffd(mat) == NULL)
{
hypre_ParCSRMatrixDeviceColMapOffd(mat) =
hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE);
hypre_TMemcpy( hypre_ParCSRMatrixDeviceColMapOffd(mat),
hypre_ParCSRMatrixColMapOffd(mat),
HYPRE_BigInt,
hypre_CSRMatrixNumCols(Ba),
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST );
}
hypreDevice_CopyParCSRRows( 1, NULL, -1, Ba != NULL,
hypre_ParCSRMatrixFirstColDiag(mat),
hypre_ParCSRMatrixDeviceColMapOffd(mat),
hypre_CSRMatrixI(Aa) + local_row,
hypre_CSRMatrixJ(Aa),
hypre_CSRMatrixData(Aa),
hypre_CSRMatrixI(Ba) + local_row,
hypre_CSRMatrixJ(Ba),
hypre_CSRMatrixData(Ba),
NULL,
hypre_ParCSRMatrixRowindices(mat),
hypre_ParCSRMatrixRowvalues(mat) );
}
if (col_ind)
{
*col_ind = hypre_ParCSRMatrixRowindices(mat);
}
if (values)
{
*values = hypre_ParCSRMatrixRowvalues(mat);
}
hypre_SyncCudaComputeStream(hypre_handle());
return hypre_error_flag;
}
/* Get element-wise tolerances based on row norms for ParCSRMatrix
* NOTE: Keep the diagonal, i.e. elmt_tol = 0.0 for diagonals
* Output vectors have size nnz:
* elmt_tols_diag[j] = tol * (norm of row i) for j in [ A_diag_i[i] , A_diag_i[i+1] )
* elmt_tols_offd[j] = tol * (norm of row i) for j in [ A_offd_i[i] , A_offd_i[i+1] )
* type == -1, infinity norm,
* 1, 1-norm
* 2, 2-norm
*/
template<HYPRE_Int type>
__global__ void
hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols( HYPRE_Int nrows,
HYPRE_Real tol,
HYPRE_Int *A_diag_i,
HYPRE_Int *A_diag_j,
HYPRE_Complex *A_diag_a,
HYPRE_Int *A_offd_i,
HYPRE_Complex *A_offd_a,
HYPRE_Real *elmt_tols_diag,
HYPRE_Real *elmt_tols_offd)
{
HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1, 1>();
if (row_i >= nrows)
{
return;
}
HYPRE_Int lane = hypre_cuda_get_lane_id<1>();
HYPRE_Int p_diag, p_offd, q_diag, q_offd;
/* sum row norm over diag part */
if (lane < 2)
{
p_diag = read_only_load(A_diag_i + row_i + lane);
}
q_diag = __shfl_sync(HYPRE_WARP_FULL_MASK, p_diag, 1);
p_diag = __shfl_sync(HYPRE_WARP_FULL_MASK, p_diag, 0);
HYPRE_Real row_norm_i = 0.0;
for (HYPRE_Int j = p_diag + lane; j < q_diag; j += HYPRE_WARP_SIZE)
{
HYPRE_Complex val = A_diag_a[j];
if (type == -1)
{
row_norm_i = hypre_max(row_norm_i, hypre_cabs(val));
}
else if (type == 1)
{
row_norm_i += hypre_cabs(val);
}
else if (type == 2)
{
row_norm_i += val * val;
}
}
/* sum row norm over offd part */
if (lane < 2)
{
p_offd = read_only_load(A_offd_i + row_i + lane);
}
q_offd = __shfl_sync(HYPRE_WARP_FULL_MASK, p_offd, 1);
p_offd = __shfl_sync(HYPRE_WARP_FULL_MASK, p_offd, 0);
for (HYPRE_Int j = p_offd + lane; j < q_offd; j += HYPRE_WARP_SIZE)
{
HYPRE_Complex val = A_offd_a[j];
if (type == -1)
{
row_norm_i = hypre_max(row_norm_i, hypre_cabs(val));
}
else if (type == 1)
{
row_norm_i += hypre_cabs(val);
}
else if (type == 2)
{
row_norm_i += val * val;
}
}
/* allreduce to get the row norm on all threads */
if (type == -1)
{
row_norm_i = warp_allreduce_max(row_norm_i);
}
else
{
row_norm_i = warp_allreduce_sum(row_norm_i);
}
if (type == 2)
{
row_norm_i = sqrt(row_norm_i);
}
/* set elmt_tols_diag */
for (HYPRE_Int j = p_diag + lane; j < q_diag; j += HYPRE_WARP_SIZE)
{
HYPRE_Int col = A_diag_j[j];
/* elmt_tol = 0.0 ensures diagonal will be kept */
if (col == row_i)
{
elmt_tols_diag[j] = 0.0;
}
else
{
elmt_tols_diag[j] = tol * row_norm_i;
}
}
/* set elmt_tols_offd */
for (HYPRE_Int j = p_offd + lane; j < q_offd; j += HYPRE_WARP_SIZE)
{
elmt_tols_offd[j] = tol * row_norm_i;
}
}
/* drop the entries that are not on the diagonal and smaller than:
* type 0: tol
* type 1: tol*(1-norm of row)
* type 2: tol*(2-norm of row)
* type -1: tol*(infinity norm of row) */
HYPRE_Int
hypre_ParCSRMatrixDropSmallEntriesDevice( hypre_ParCSRMatrix *A,
HYPRE_Complex tol,
HYPRE_Int type)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *h_col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A);
HYPRE_Real *elmt_tols_diag = NULL;
HYPRE_Real *elmt_tols_offd = NULL;
if (col_map_offd_A == NULL)
{
col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(col_map_offd_A, h_col_map_offd_A, HYPRE_BigInt, num_cols_A_offd,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A;
}
/* get elmement-wise tolerances if needed */
if (type != 0)
{
elmt_tols_diag = hypre_TAlloc(HYPRE_Real, hypre_CSRMatrixNumNonzeros(A_diag), HYPRE_MEMORY_DEVICE);
elmt_tols_offd = hypre_TAlloc(HYPRE_Real, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE);
}
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim);
if (type == -1)
{
HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols < -1 >, gDim, bDim,
hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd);
}
if (type == 1)
{
HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<1>, gDim, bDim,
hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd);
}
if (type == 2)
{
HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<2>, gDim, bDim,
hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd);
}
/* drop entries from diag and offd CSR matrices */
hypre_CSRMatrixDropSmallEntriesDevice(A_diag, tol, elmt_tols_diag);
hypre_CSRMatrixDropSmallEntriesDevice(A_offd, tol, elmt_tols_offd);
hypre_ParCSRMatrixSetNumNonzeros(A);
hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A);
/* squeeze out zero columns of A_offd */
HYPRE_Int *tmp_j, *tmp_end, num_cols_A_offd_new;
tmp_j = hypre_TAlloc(HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(tmp_j, hypre_CSRMatrixJ(A_offd), HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd),
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( sort,
tmp_j,
tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) );
tmp_end = HYPRE_THRUST_CALL( unique,
tmp_j,
tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) );
num_cols_A_offd_new = tmp_end - tmp_j;
hypre_assert(num_cols_A_offd_new <= num_cols_A_offd);
if (num_cols_A_offd_new < num_cols_A_offd)
{
hypre_CSRMatrixNumCols(A_offd) = num_cols_A_offd_new;
HYPRE_Int *offd_mark = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_DEVICE);
HYPRE_BigInt *col_map_offd_A_new = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new,
HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( scatter,
thrust::counting_iterator<HYPRE_Int>(0),
thrust::counting_iterator<HYPRE_Int>(num_cols_A_offd_new),
tmp_j,
offd_mark );
HYPRE_THRUST_CALL( gather,
hypre_CSRMatrixJ(A_offd),
hypre_CSRMatrixJ(A_offd) + hypre_CSRMatrixNumNonzeros(A_offd),
offd_mark,
hypre_CSRMatrixJ(A_offd) );
HYPRE_THRUST_CALL( gather,
tmp_j,
tmp_j + num_cols_A_offd_new,
col_map_offd_A,
col_map_offd_A_new );
hypre_TFree(offd_mark, HYPRE_MEMORY_DEVICE);
hypre_TFree(col_map_offd_A, HYPRE_MEMORY_DEVICE);
hypre_TFree(h_col_map_offd_A, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A_new;
hypre_ParCSRMatrixColMapOffd(A) = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new,
HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(A), col_map_offd_A_new, HYPRE_BigInt,
num_cols_A_offd_new,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
}
if (type != 0)
{
hypre_TFree(elmt_tols_diag, HYPRE_MEMORY_DEVICE);
hypre_TFree(elmt_tols_offd, HYPRE_MEMORY_DEVICE);
}
hypre_TFree(tmp_j, HYPRE_MEMORY_DEVICE);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixTransposeDevice
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixTransposeDevice( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix **AT_ptr,
HYPRE_Int data )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *A_diagT;
hypre_CSRMatrix *AT_offd;
HYPRE_Int num_procs;
HYPRE_Int num_cols_offd_AT = 0;
HYPRE_BigInt *col_map_offd_AT = NULL;
hypre_ParCSRMatrix *AT;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
if (num_procs > 1)
{
void *request;
hypre_CSRMatrix *A_offdT, *Aext;
HYPRE_Int *Aext_ii, *Aext_j, Aext_nnz;
HYPRE_Complex *Aext_data;
HYPRE_BigInt *tmp_bigj;
hypre_CSRMatrixTranspose(A_offd, &A_offdT, data);
hypre_CSRMatrixBigJ(A_offdT) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumNonzeros(A_offdT),
HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( transform,
hypre_CSRMatrixJ(A_offdT),
hypre_CSRMatrixJ(A_offdT) + hypre_CSRMatrixNumNonzeros(A_offdT),
thrust::make_constant_iterator(hypre_ParCSRMatrixFirstRowIndex(A)),
hypre_CSRMatrixBigJ(A_offdT),
thrust::plus<HYPRE_BigInt>() );
if (!hypre_ParCSRMatrixCommPkg(A))
{
hypre_MatvecCommPkgCreate(A);
}
hypre_ExchangeExternalRowsDeviceInit(A_offdT, hypre_ParCSRMatrixCommPkg(A), data, &request);
hypre_CSRMatrixTranspose(A_diag, &A_diagT, data);
Aext = hypre_ExchangeExternalRowsDeviceWait(request);
hypre_CSRMatrixDestroy(A_offdT);
// Aext contains offd of AT
Aext_nnz = hypre_CSRMatrixNumNonzeros(Aext);
Aext_ii = hypreDevice_CsrRowPtrsToIndices(hypre_CSRMatrixNumRows(Aext), Aext_nnz,
hypre_CSRMatrixI(Aext));
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(hypre_ParCSRMatrixCommPkg(A));
HYPRE_THRUST_CALL( gather,
Aext_ii,
Aext_ii + Aext_nnz,
hypre_ParCSRCommPkgDeviceSendMapElmts(hypre_ParCSRMatrixCommPkg(A)),
Aext_ii );
tmp_bigj = hypre_TAlloc(HYPRE_BigInt, Aext_nnz, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(tmp_bigj, hypre_CSRMatrixBigJ(Aext), HYPRE_BigInt, Aext_nnz, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( sort,
tmp_bigj,
tmp_bigj + Aext_nnz );
HYPRE_BigInt *new_end = HYPRE_THRUST_CALL( unique,
tmp_bigj,
tmp_bigj + Aext_nnz );
num_cols_offd_AT = new_end - tmp_bigj;
col_map_offd_AT = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(col_map_offd_AT, tmp_bigj, HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_DEVICE);
hypre_TFree(tmp_bigj, HYPRE_MEMORY_DEVICE);
Aext_j = hypre_TAlloc(HYPRE_Int, Aext_nnz, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( lower_bound,
col_map_offd_AT,
col_map_offd_AT + num_cols_offd_AT,
hypre_CSRMatrixBigJ(Aext),
hypre_CSRMatrixBigJ(Aext) + Aext_nnz,
Aext_j );
Aext_data = hypre_CSRMatrixData(Aext);
hypre_CSRMatrixData(Aext) = NULL;
hypre_CSRMatrixDestroy(Aext);
if (data)
{
hypreDevice_StableSortByTupleKey(Aext_nnz, Aext_ii, Aext_j, Aext_data, 0);
}
else
{
HYPRE_THRUST_CALL( stable_sort,
thrust::make_zip_iterator(thrust::make_tuple(Aext_ii, Aext_j)),
thrust::make_zip_iterator(thrust::make_tuple(Aext_ii, Aext_j)) + Aext_nnz );
}
AT_offd = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumCols(A), num_cols_offd_AT, Aext_nnz);
hypre_CSRMatrixJ(AT_offd) = Aext_j;
hypre_CSRMatrixData(AT_offd) = Aext_data;
hypre_CSRMatrixInitialize_v2(AT_offd, 0, HYPRE_MEMORY_DEVICE);
hypreDevice_CsrRowIndicesToPtrs_v2(hypre_CSRMatrixNumRows(AT_offd), Aext_nnz, Aext_ii,
hypre_CSRMatrixI(AT_offd));
hypre_TFree(Aext_ii, HYPRE_MEMORY_DEVICE);
}
else
{
hypre_CSRMatrixTransposeDevice(A_diag, &A_diagT, data);
AT_offd = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumCols(A), 0, 0);
hypre_CSRMatrixInitialize_v2(AT_offd, 0, HYPRE_MEMORY_DEVICE);
}
AT = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixColStarts(A),
hypre_ParCSRMatrixRowStarts(A),
num_cols_offd_AT,
hypre_CSRMatrixNumNonzeros(A_diagT),
hypre_CSRMatrixNumNonzeros(AT_offd));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(AT));
hypre_ParCSRMatrixDiag(AT) = A_diagT;
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(AT));
hypre_ParCSRMatrixOffd(AT) = AT_offd;
if (num_cols_offd_AT)
{
hypre_ParCSRMatrixDeviceColMapOffd(AT) = col_map_offd_AT;
hypre_ParCSRMatrixColMapOffd(AT) = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(AT), col_map_offd_AT, HYPRE_BigInt, num_cols_offd_AT,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
}
*AT_ptr = AT;
return hypre_error_flag;
}
HYPRE_Int
hypre_ParCSRMatrixAddDevice( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
HYPRE_Complex beta,
hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix **C_ptr )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
HYPRE_Int num_cols_offd_C = 0;
HYPRE_BigInt *d_col_map_offd_C = NULL;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
hypre_CSRMatrix *C_diag = hypre_CSRMatrixAddDevice(alpha, A_diag, beta, B_diag);
hypre_CSRMatrix *C_offd;
//if (num_cols_offd_A || num_cols_offd_B)
if (num_procs > 1)
{
hypre_ParCSRMatrixCopyColMapOffdToDevice(A);
hypre_ParCSRMatrixCopyColMapOffdToDevice(B);
HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A + num_cols_offd_B,
HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(tmp, hypre_ParCSRMatrixDeviceColMapOffd(A), HYPRE_BigInt,
num_cols_offd_A, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(tmp + num_cols_offd_A, hypre_ParCSRMatrixDeviceColMapOffd(B), HYPRE_BigInt,
num_cols_offd_B, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( sort, tmp, tmp + num_cols_offd_A + num_cols_offd_B );
HYPRE_BigInt *new_end = HYPRE_THRUST_CALL( unique, tmp, tmp + num_cols_offd_A + num_cols_offd_B );
num_cols_offd_C = new_end - tmp;
d_col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(d_col_map_offd_C, tmp, HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_DEVICE);
/* reuse memory of tmp */
HYPRE_Int *offd_A2C = (HYPRE_Int *) tmp;
HYPRE_Int *offd_B2C = offd_A2C + num_cols_offd_A;
HYPRE_THRUST_CALL( lower_bound,
d_col_map_offd_C,
d_col_map_offd_C + num_cols_offd_C,
hypre_ParCSRMatrixDeviceColMapOffd(A),
hypre_ParCSRMatrixDeviceColMapOffd(A) + num_cols_offd_A,
offd_A2C );
HYPRE_THRUST_CALL( lower_bound,
d_col_map_offd_C,
d_col_map_offd_C + num_cols_offd_C,
hypre_ParCSRMatrixDeviceColMapOffd(B),
hypre_ParCSRMatrixDeviceColMapOffd(B) + num_cols_offd_B,
offd_B2C );
HYPRE_Int *C_offd_i, *C_offd_j, nnzC_offd;
HYPRE_Complex *C_offd_a;
hypreDevice_CSRSpAdd( hypre_CSRMatrixNumRows(A_offd),
hypre_CSRMatrixNumRows(B_offd),
num_cols_offd_C,
hypre_CSRMatrixNumNonzeros(A_offd),
hypre_CSRMatrixNumNonzeros(B_offd),
hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixJ(A_offd),
alpha,
hypre_CSRMatrixData(A_offd),
offd_A2C,
hypre_CSRMatrixI(B_offd),
hypre_CSRMatrixJ(B_offd),
beta,
hypre_CSRMatrixData(B_offd),
offd_B2C,
NULL,
&nnzC_offd,
&C_offd_i,
&C_offd_j,
&C_offd_a );
hypre_TFree(tmp, HYPRE_MEMORY_DEVICE);
C_offd = hypre_CSRMatrixCreate(hypre_CSRMatrixNumRows(A_offd), num_cols_offd_C, nnzC_offd);
hypre_CSRMatrixI(C_offd) = C_offd_i;
hypre_CSRMatrixJ(C_offd) = C_offd_j;
hypre_CSRMatrixData(C_offd) = C_offd_a;
hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_DEVICE;
}
else
{
C_offd = hypre_CSRMatrixCreate(hypre_CSRMatrixNumRows(A_offd), 0, 0);
hypre_CSRMatrixInitialize_v2(C_offd, 0, HYPRE_MEMORY_DEVICE);
}
/* Create ParCSRMatrix C */
hypre_ParCSRMatrix *C = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
num_cols_offd_C,
hypre_CSRMatrixNumNonzeros(C_diag),
hypre_CSRMatrixNumNonzeros(C_offd));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C));
hypre_ParCSRMatrixDiag(C) = C_diag;
hypre_ParCSRMatrixOffd(C) = C_offd;
if (num_cols_offd_C)
{
hypre_ParCSRMatrixDeviceColMapOffd(C) = d_col_map_offd_C;
hypre_ParCSRMatrixColMapOffd(C) = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(C), d_col_map_offd_C, HYPRE_BigInt, num_cols_offd_C,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
}
hypre_ParCSRMatrixSetNumNonzeros(C);
hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C);
/* create CommPkg of C */
hypre_MatvecCommPkgCreate(C);
*C_ptr = C;
return hypre_error_flag;
}
#endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
/*--------------------------------------------------------------------------
* HYPRE_ParCSRDiagScale
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRDiagScale( HYPRE_ParCSRMatrix HA,
HYPRE_ParVector Hy,
HYPRE_ParVector Hx )
{
hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA;
hypre_ParVector *y = (hypre_ParVector *) Hy;
hypre_ParVector *x = (hypre_ParVector *) Hx;
HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y));
HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A));
HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x));
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, 0.0, x_data);
//hypre_SyncCudaComputeStream(hypre_handle());
#else /* #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_size; i++)
{
x_data[i] = y_data[i] / A_data[A_i[i]];
}
#endif /* #if defined(HYPRE_USING_CUDA) */
return ierr;
}
|
DynamicMatrixSpecialization.h | #pragma once
//include cstring for memcpy
#include <cstring>
#include <core.math/core.math.h>
#include <core.math/matrix.dslib/DynamicMatrix.h>
NS_BEGIN(CORE_MATH_NAMESPACE)
template<typename T> SpecializeMatrixDynamicSizeColumns(::core::math::matrix::dslib::DynamicMatrix<T>);
template<typename T> SpecializeMatrixDynamicSizeRows(::core::math::matrix::dslib::DynamicMatrix<T>);
template<typename T> SpecializeMatrixCoefficientType(::core::math::matrix::dslib::DynamicMatrix<T>, T);
template<typename T>
BeginSpecializeMatrixResize(::core::math::matrix::dslib::DynamicMatrix<T>)
return matrix.resize(rowcount, colcount);
EndSpecializeMatrixResize
template<typename T>
class MatrixAddition < ::core::math::matrix::dslib::DynamicMatrix<T>, ::core::math::matrix::dslib::DynamicMatrix<T>, ::core::math::matrix::dslib::DynamicMatrix<T> > {
public:
static inline void operation(::core::math::matrix::dslib::DynamicMatrix<T> & sumMat, const ::core::math::matrix::dslib::DynamicMatrix<T> & aMat, const ::core::math::matrix::dslib::DynamicMatrix<T> & bMat){
int rows = aMat.rows();
int cols = bMat.cols();
if (bMat.rows() != rows || bMat.cols() != cols){
std::cerr << "matrix addition failed. dimension mismatch" << std::endl;
return;
}
int size = rows*cols;
sumMat.resize(rows, cols);
// get data arrays
const T* a = aMat.data();
const T* b = bMat.data();
T* c = sumMat.data();
//#pragma omp parallel for
for (int i = 0; i < size; ++i){
c[i] = a[i] + b[i];
}
}
};
template<typename T>
class MatrixSubtraction < ::core::math::matrix::dslib::DynamicMatrix<T>, ::core::math::matrix::dslib::DynamicMatrix<T>, ::core::math::matrix::dslib::DynamicMatrix<T> > {
public:
static inline void operation(::core::math::matrix::dslib::DynamicMatrix<T> & sumMat, const ::core::math::matrix::dslib::DynamicMatrix<T> & aMat, const ::core::math::matrix::dslib::DynamicMatrix<T> & bMat){
int rows = aMat.rows();
int cols = bMat.cols();
if (bMat.rows() != rows || bMat.cols() != cols){
std::cerr << "matrix addition failed. dimension mismatch" << std::endl;
return;
}
int size = rows*cols;
sumMat.resize(rows, cols);
// get data arrays
const T* a = aMat.data();
const T* b = bMat.data();
T* c = sumMat.data();
//#pragma omp parallel for
for (int i = 0; i < size; ++i){
c[i] = a[i] - b[i];
}
}
};
template<typename T>
class MatrixSetConstant < T, ::core::math::matrix::dslib::DynamicMatrix<T> > {
public:
static inline void operation(::core::math::matrix::dslib::DynamicMatrix<T> & target, const T & value){
if (value == 0.0){
memset(target.data(), 0, target.dataByteSize());
return;
}
T * t = target.data();
const int s = target.size();
//go serial if size is smaller than sum trehshold
if (s < 1000){
for (int i = 0; i < s; i++){
t[i] = value;
}
return;
}
//go parallel
//#pragma omp parallel for
for (int i = 0; i < s; i++){
t[i] = value;
}
}
};
template<typename T>
class MatrixSetFunction < ::core::math::matrix::dslib::DynamicMatrix<T>, std::function<void(T & val, int i, int j)> > {
public:
static inline void operation(::core::math::matrix::dslib::DynamicMatrix<T> & target, std::function<void(T & val, int i, int j)> f){
T * t = target.data();
const int s = target.size();
const int rows = target.rows();
const int cols = target.cols();
//go serial if size is smaller than sum trehshold
if (s < 1000){
for (int i = 0; i < s; i++){
f(t[i], i / cols, i%cols);
}
return;
}
//go parallel
//#pragma omp parallel for
for (int i = 0; i < s; i++){
f(t[i], i / cols, i%cols);
}
}
};
template<typename T>
class MatrixScalarMultiplication < ::core::math::matrix::dslib::DynamicMatrix<T>, ::core::math::matrix::dslib::DynamicMatrix<T>, T > {
public:
static inline void operation(::core::math::matrix::dslib::DynamicMatrix<T> & cMat, const ::core::math::matrix::dslib::DynamicMatrix<T> & aMat, const T & b){
const int s = aMat.size();
cMat.resize(aMat.rows(), aMat.cols());
const T * a = aMat.data();
T * c = cMat.data();
//go serial if size is smaller than sum trehshold
if (s < 1000){
for (int i = 0; i < s; i++){
c[i] = b*a[i];
}
return;
}
//go parallel
//#pragma omp parallel for
for (int i = 0; i < s; i++){
c[i] = b*a[i];
}
}
};
template<typename T>
BeginSpecializeMatrixAssign(::core::math::matrix::dslib::DynamicMatrix<T>, ::core::math::matrix::dslib::DynamicMatrix<T>)
{
const int rows = rhs.rows();
const int cols = rhs.cols();
lhs.resize(rows, cols);
T * a = lhs.data();
const T * b = rhs.data();
const int size = rows*cols;
if (size < 20000){
memcpy(a, b, lhs.dataByteSize());
return true;
}
//#pragma omp parallel for
for (int i = 0; i < size; i++){
a[i] = b[i];
}
return true;
}
EndSpecializeMatrixAssign;
template<typename T>
class MatrixExtractBlock < ::core::math::matrix::dslib::DynamicMatrix<T>, ::core::math::matrix::dslib::DynamicMatrix<T> > {
public:
static inline void operation(::core::math::matrix::dslib::DynamicMatrix<T> & result, const ::core::math::matrix::dslib::DynamicMatrix<T> & original, size_t startRow, size_t startCol, size_t rows, size_t cols){
uint srcCols = original.cols();
uint srcRows = original.rows();
if (startRow + rows > srcRows){
std::cerr << __FUNCSIG__ << " rows out of range" << std::endl;
return;
}
if (startCol + cols > srcCols){
std::cerr << __FUNCSIG__ << " cols out of range" << std::endl;
return;
}
result.resize(rows, cols, false);
const Real * src = original.data();
Real * dst = result.data();
size_t length = sizeof(T)*cols;
const Real * srcOffset = src + startRow * srcCols;
for (uint i = 0; i < rows; i++){
memcpy(dst + i*cols, srcOffset + i*srcCols, length); //saved one operation
//memcpy(dst+i*cols,src+(startRow+i)*(srcCols),length);
}
}
};
template<typename KernelMatrix, typename T>
class MatrixConvolution < ::core::math::matrix::dslib::DynamicMatrix<T>, ::core::math::matrix::dslib::DynamicMatrix<T>, KernelMatrix, T > {
public:
static inline void operation(::core::math::matrix::dslib::DynamicMatrix<T> & rMat, const ::core::math::matrix::dslib::DynamicMatrix<T> & gMat, const KernelMatrix & kernel){
uint fx = kernel.cols();
uint fy = kernel.rows();
uint rx = gMat.cols() - fx + 1;
uint ry = gMat.rows() - fy + 1;
rMat.resize(ry, rx);
uint rCols = rMat.cols();
uint gCols = gMat.cols();
Real * r = rMat.data();
const Real* g = gMat.data();
Real * f = new Real[kernel.size()];
for (int i = 0; i < kernel.size(); i++)f[i] = kernel(i);
const Real * gOffset;
const Real * fOffset;
T sum = 0.0;
for (int i = 0; i < ry; i++){
for (int j = 0; j < rx; j++){
sum = 0.0;
for (int l = 0; l < fy; ++l){
gOffset = g + (i + l)*gCols + j;
fOffset = f + l*fx;
for (int k = 0; k < fx; ++k){
sum = sum + gOffset[k] * fOffset[k];
}
}
//r(i,j)=sum;
r[i*rCols + j] = sum;
}
}
delete f;
}
};
NS_END(CORE_MATH_NAMESPACE)
|
zipmonster_fmt_plug.c | /* This format is reverse engineered from InsidePro Hash Manager!
*
* This software is Copyright (c) 2016, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_zipmonster;
#elif FMT_REGISTERS_H
john_register_one(&fmt_zipmonster);
#else
#include "arch.h"
#include "sha.h"
#include "md5.h"
#include <string.h>
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "simd-intrinsics.h"
//#undef SIMD_COEF_32
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "ZipMonster"
#define FORMAT_NAME "MD5(ZipMonster)"
#define ALGORITHM_NAME "MD5-" MD5_ALGORITHM_NAME " x 50000"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define SALT_SIZE 0
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#ifdef SIMD_COEF_32
#define MAX_KEYS_PER_CRYPT (SIMD_PARA_MD5*SIMD_COEF_32)
#else
#define MAX_KEYS_PER_CRYPT 1
#endif
#define FORMAT_TAG "$zipmonster$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
static struct fmt_tests zipmonster_tests[] = {
{"$zipmonster$e0f68d6f40c5f157c169e9ca0a6f09fe", "!"},
{"4dac447f100ee85327db2b47e295e50d", "1"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static unsigned short itoa16u_w[256];
#ifdef SIMD_COEF_32
#define GETPOS(i,index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 )
#endif
static void init(struct fmt_main *self)
{
int i;
char buf[3];
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
for (i = 0; i < 256; ++i) {
sprintf(buf, "%X%X", i>>4, i&0xF);
memcpy(&(itoa16u_w[i]), buf, 2);
}
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = ciphertext + TAG_LENGTH;
if (!p)
return 0;
if (!ishexlc(p))
return 0;
if (strlen(p) != BINARY_SIZE * 2)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + 2 * BINARY_SIZE + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
strcpy(out, FORMAT_TAG);
strcpy(&out[TAG_LENGTH], ciphertext);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p = ciphertext + TAG_LENGTH;
int i;
for (i = 0; i < BINARY_SIZE && *p; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#ifndef SIMD_COEF_32
inline static void hex_encode_uppercase(unsigned char *str, unsigned char *_out)
{
int i;
unsigned short *out = (unsigned short*)_out;
for (i = 0; i < BINARY_SIZE; ++i) {
out[i] = itoa16u_w[str[i]];
}
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
int inc = 1;
#ifdef SIMD_COEF_32
inc = SIMD_COEF_32*SIMD_PARA_MD5;
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += inc)
{
unsigned char buffer[BINARY_SIZE];
MD5_CTX ctx;
int n = 49999;
#ifdef SIMD_COEF_32
int j, k;
uint32_t *p, t;
uint8_t ib[64 * SIMD_COEF_32 * SIMD_PARA_MD5 + MEM_ALIGN_SIMD];
uint8_t ob[16 * SIMD_COEF_32 * SIMD_PARA_MD5 + MEM_ALIGN_SIMD];
uint8_t *md5 = mem_align(ib, MEM_ALIGN_SIMD);
uint32_t *crypt_buf = mem_align(ob, MEM_ALIGN_SIMD);
memset(md5, 0, 64 * SIMD_COEF_32 * SIMD_PARA_MD5);
for (j = 0; j < SIMD_COEF_32*SIMD_PARA_MD5; ++j) {
uint16_t *op = (uint16_t*)&md5[GETPOS(0, j)];
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index+j], strlen(saved_key[index+j]));
MD5_Final(buffer, &ctx);
for (k = 0; k < 16; ++k) {
op[0] = itoa16u_w[buffer[k++]];
op[1] = itoa16u_w[buffer[k]];
op += ((SIMD_COEF_32) << 1);
}
md5[GETPOS(32,j)] = 0x80;
md5[GETPOS(57,j)] = 1;
}
#else
unsigned char hex_buffer[BINARY_SIZE * 2];
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Final(buffer, &ctx);
hex_encode_uppercase(buffer, hex_buffer);
#endif
do {
#ifdef SIMD_COEF_32
SIMDmd5body(md5, crypt_buf, NULL, SSEi_MIXED_IN);
// upper case hex encode into the next input buffer.
for (j = 0; j < SIMD_PARA_MD5*SIMD_COEF_32; ++j) {
int i;
uint16_t *op = (uint16_t*)&md5[GETPOS(0, j)];
p = &crypt_buf[(j&(SIMD_COEF_32-1))+(4*SIMD_COEF_32*(j/SIMD_COEF_32))];
for (i = 0; i < 4; ++i) {
t = *p;
p += SIMD_COEF_32;
op[0] = itoa16u_w[t&0xFF];
op[1] = itoa16u_w[(t>>8)&0xFF];
t >>= 16;
op += ((SIMD_COEF_32) << 1);
op[0] = itoa16u_w[t&0xFF];
op[1] = itoa16u_w[(t>>8)&0xFF];
op += ((SIMD_COEF_32) << 1);
}
}
#else
MD5_Init(&ctx);
MD5_Update(&ctx, hex_buffer, BINARY_SIZE * 2);
MD5_Final(buffer, &ctx);
hex_encode_uppercase(buffer, hex_buffer);
#endif
--n;
} while (n);
#ifdef SIMD_COEF_32
p = crypt_buf;
for (j = 0; j < SIMD_PARA_MD5*SIMD_COEF_32; j+=SIMD_COEF_32) {
for (k = 0; k < SIMD_COEF_32*4; ++k) {
uint32_t J = j+(k&(SIMD_COEF_32-1)), K = (k/SIMD_COEF_32);
crypt_out[index+J][K] = *p++;
}
}
#else
memcpy((unsigned char*)crypt_out[index], buffer, BINARY_SIZE);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void zipmonster_set_key(char *key, int index)
{
saved_len[index] =
strnzcpyn(saved_key[index], key, sizeof(saved_key[index]));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_zipmonster = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
{ FORMAT_TAG },
#endif
zipmonster_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
zipmonster_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__identity_int32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_uint32)
// op(A') function: GB (_unop_tran__identity_int32_uint32)
// C type: int32_t
// A type: uint32_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_uint32)
(
int32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
displacement_residual_contact_criteria.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementResidualContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* This class implements a convergence control based on nodal displacement (for penalty contact)
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementResidualContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementResidualContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementResidualContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
/// The definition of the current class
typedef DisplacementResidualContactCriteria< TSparseSpace, TDenseSpace > ClassType;
/// The dofs array type
typedef typename BaseType::DofsArrayType DofsArrayType;
/// The sparse matrix type
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
/// The dense vector type
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor.
*/
explicit DisplacementResidualContactCriteria()
: BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementResidualContactCriteria(Kratos::Parameters ThisParameters)
: BaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default constructor
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param RotRatioTolerance Relative tolerance for rotation residual error
* @param RotAbsTolerance Absolute tolerance for rotation residual error
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementResidualContactCriteria(
const double DispRatioTolerance,
const double DispAbsTolerance,
const double RotRatioTolerance,
const double RotAbsTolerance,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
// The displacement residual
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The rotation residual
mRotRatioTolerance = RotRatioTolerance;
mRotAbsTolerance = RotAbsTolerance;
}
//* Copy constructor.
DisplacementResidualContactCriteria( DisplacementResidualContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mRotRatioTolerance(rOther.mRotRatioTolerance)
,mRotAbsTolerance(rOther.mRotAbsTolerance)
,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm)
,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm)
{
}
/// Destructor.
~DisplacementResidualContactCriteria() override = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(Parameters ThisParameters) const override
{
return Kratos::make_shared<ClassType>(ThisParameters);
}
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
double disp_residual_solution_norm = 0.0;
IndexType disp_dof_num(0);
double rot_residual_solution_norm = 0.0;
IndexType rot_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
double residual_dof_value = 0.0;
// Auxiliar displacement DoF check
const std::function<bool(const VariableData&)> check_without_rot =
[](const VariableData& rCurrVar) -> bool {return true;};
const std::function<bool(const VariableData&)> check_with_rot =
[](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));};
const auto* p_check_disp = (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_residual_solution_norm,disp_dof_num,rot_residual_solution_norm,rot_dof_num,dof_id,residual_dof_value)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
residual_dof_value = rb[dof_id];
const auto& r_curr_var = it_dof->GetVariable();
if ((*p_check_disp)(r_curr_var)) {
disp_residual_solution_norm += std::pow(residual_dof_value, 2);
++disp_dof_num;
} else { // We will assume is rotation dof
KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl;
rot_residual_solution_norm += std::pow(residual_dof_value, 2);
++rot_dof_num;
}
}
}
mDispCurrentResidualNorm = disp_residual_solution_norm;
mRotCurrentResidualNorm = rot_residual_solution_norm;
double residual_disp_ratio = 1.0;
double residual_rot_ratio = 1.0;
// We initialize the solution
if (mOptions.IsNot(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm;
residual_disp_ratio = 1.0;
if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
mRotInitialResidualNorm = (rot_residual_solution_norm == 0.0) ? 1.0 : rot_residual_solution_norm;
residual_rot_ratio = 1.0;
}
mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm;
// We calculate the absolute norms
const double residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
const double residual_rot_abs = mRotCurrentResidualNorm/rot_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance;
} else {
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl;
}
} else {
KRATOS_INFO("DisplacementResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl;
}
}
}
}
r_process_info[CONVERGENCE_RATIO] = residual_disp_ratio;
r_process_info[RESIDUAL_NORM] = residual_disp_abs;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool rot_converged = (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true;
if (disp_converged && rot_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
// Initialize
BaseType::mConvergenceCriteriaIsInitialized = true;
// Check rotation dof
mOptions.Set(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart));
// Initialize header
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table.AddColumn("RT RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "displacement_residual_contact_criteria",
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"rotation_residual_relative_tolerance" : 1.0e-4,
"rotation_residual_absolute_tolerance" : 1.0e-9
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "displacement_residual_contact_criteria";
}
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "DisplacementResidualContactCriteria";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// The displacement residual
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The rotation residual
mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble();
mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
double mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
double mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
double mDispInitialResidualNorm; /// The reference norm of the displacement residual
double mDispCurrentResidualNorm; /// The current norm of the displacement residual
double mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual
double mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual
double mRotInitialResidualNorm; /// The reference norm of the rotation residual
double mRotCurrentResidualNorm; /// The current norm of the rotation residual
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementResidualContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4));
}
#endif /* KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H */
|
GB_binop__isge_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__isge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint8)
// A*D function (colscale): GB (_AxD__isge_uint8)
// D*A function (rowscale): GB (_DxB__isge_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint8)
// C=scalar+B GB (_bind1st__isge_uint8)
// C=scalar+B' GB (_bind1st_tran__isge_uint8)
// C=A+scalar GB (_bind2nd__isge_uint8)
// C=A'+scalar GB (_bind2nd_tran__isge_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT8 || GxB_NO_ISGE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <cinttypes>
#include <cstddef>
#include <iostream>
#include <type_traits>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
void PrintEdgeWeight() {
std::cout << " -- v: " << v << "(" << w << ")\n";
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used for *non-negative* offsets within a neighborhood
typedef std::make_unsigned<std::ptrdiff_t>::type OffsetT;
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
OffsetT start_offset_;
public:
Neighborhood(NodeID_ n, DestID_** g_index, OffsetT start_offset) :
n_(n), g_index_(g_index), start_offset_(0) {
OffsetT max_offset = end() - begin();
start_offset_ = std::min(start_offset, max_offset);
}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_] + start_offset_; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
num_edges_ = out_index_[num_nodes_] - out_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n, OffsetT start_offset = 0) const {
return Neighborhood(n, out_index_, start_offset);
}
Neighborhood in_neigh(NodeID_ n, OffsetT start_offset = 0) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_, start_offset);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
/*
Helper function to print outgoing neighbors
of a node with their weights
*/
void PrintNeighbors(NodeID_ node_id) const {
std::cout << "Printing neighbors for " << node_id << std::endl;
for(auto v : out_neigh(node_id))
std::cout << " -- v: " << v.v << " (" << v.w << ")" << std::endl;
}
/*
Function to calculate the difference between
max and min timestamp difference from all
outgoing edges from a node.
TODO: integrate this while building a graph so
we don't have to recompute this every time?
*/
float TimeBoundsDelta(NodeID_ node_id) const {
// PrintNeighbors(node_id);
float min_bound = 0, max_bound = 0;
int cnt = 0;
for(auto v : out_neigh(node_id)) {
if(cnt == 0)
min_bound = max_bound = v.w;
if(v.w < min_bound)
min_bound = v.w;
if(v.w > max_bound)
max_bound = v.w;
cnt++;
}
return (max_bound - min_bound);
}
bool EdgeExists(NodeID_ src_node, NodeID_ dst_node) const {
for(auto v : out_neigh(src_node))
{
if(v.v == dst_node)
return true;
}
return false;
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
sylcount.c | #include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <stdbool.h>
#include <stdint.h>
#include <math.h>
#include "include/reactor.h"
#include "include/RNACI.h"
#include "include/sylcount.h"
#define BUFLEN 64
#define ITER_PER_CHECK 256
#define CHARPT(x,i) ((char*)CHAR(STRING_ELT(x,i)))
static inline bool is_sentend(const char c)
{
return (c=='.' || c==';' || c=='!' || c=='?');
}
static inline bool is_wordend(const char c)
{
return (isspace(c) || ispunct(c) || c == '\0');
}
// -------------------------------------------------------
// Various "readability" score-ers
// -------------------------------------------------------
// Flesch reading ease
static inline double re_score(const uint32_t tot_words, const uint32_t tot_sents, const uint32_t tot_sylls)
{
return 206.835 - 1.015*((double) tot_words/tot_sents) - 84.6*((double) tot_sylls/tot_words);
}
// Flesch-Kincaid grade level
static inline double gl_score(const uint32_t tot_words, const uint32_t tot_sents, const uint32_t tot_sylls)
{
return 0.39 * ((double) tot_words/tot_sents) + 11.8 * ((double) tot_sylls/tot_words) - 15.59;
}
// Automated Readability Index
static inline int ari_score(const uint32_t tot_chars, const uint32_t tot_words, const uint32_t tot_sents)
{
return (int) ceil(4.71 * ((double) tot_chars/tot_words) + 0.5 * ((double) tot_words/tot_sents) - 21.43);
}
// Simple Measure of Gobbledygook
static inline double smog_score(const uint32_t tot_polys, const uint32_t tot_sents)
{
return 1.043 * sqrt(30.0 * ((double) tot_polys/tot_sents)) + 3.1291;
}
// Coleman-Liau
static inline double cl_score(const uint32_t tot_chars, const uint32_t tot_words, const uint32_t tot_sents)
{
return 0.0588 * ((double) 100.0 * tot_chars/tot_words) - 0.296 * ((double) 100.0 * tot_sents/tot_words) - 15.8;
}
static inline void counts_set_degenerate(SEXP chars, SEXP wordchars, SEXP words, SEXP nw,
SEXP sents, SEXP sylls, SEXP polys, const int i)
{
INT(chars, i) = 0;
INT(wordchars, i) = 0;
INT(words, i) = 0;
INT(nw, i) = 0;
INT(sents, i) = 0;
INT(sylls, i) = 0;
INT(polys, i) = 0;
}
static inline void scores_set_degenerate(SEXP re, SEXP gl, SEXP ari, SEXP smog,
SEXP cl, const int i)
{
DBL(re, i) = R_NaN;
DBL(gl, i) = R_NaN;
INT(ari, i) = NA_INTEGER;
DBL(smog, i) = R_NaN;
DBL(cl, i) = R_NaN;
}
SEXP R_readability(SEXP s_, SEXP nthreads_)
{
SEXP ret, ret_names;
SEXP chars, wordchars, words, nw, sents, sylls, polys;
SEXP ari, re, gl, smog, cl;
CHECK_IS_STRINGS(s_);
CHECK_IS_POSINT(nthreads_, "nthreads");
const int len = LENGTH(s_);
int nthreads = asInteger(nthreads_);
newRvec(chars, len, "int");
newRvec(wordchars, len, "int");
newRvec(words, len, "int");
newRvec(nw, len, "int");
newRvec(sents, len, "int");
newRvec(sylls, len, "int");
newRvec(polys, len, "int");
newRvec(re, len, "dbl");
newRvec(gl, len, "dbl");
newRvec(ari, len, "int");
newRvec(smog, len, "dbl");
newRvec(cl, len, "dbl");
#ifdef _OPENMP
#pragma omp parallel num_threads(nthreads)
#endif
{
char buf[BUFLEN];
#ifdef _OPENMP
#pragma omp for
#endif
for (int i=0; i<len; i++)
{
const char *const s = CHARPT(s_, i);
const int slen = strlen(s);
int j = 0;
while (j < slen && s[j] == ' ')
j++;
if (slen == 0 || j == slen)
{
counts_set_degenerate(chars, wordchars, words, nw, sents, sylls, polys, i);
scores_set_degenerate(re, gl, ari, smog, cl, i);
continue;
}
uint32_t tot_wordchars = 0;
uint32_t tot_words = 0;
uint32_t tot_nonwords = 0;
uint32_t tot_sents = 0;
uint32_t tot_sylls = 0;
uint32_t tot_polys = 0;
int start = 0;
int end;
for (; j<=slen && slen>0; j++)
{
if (isalnum(s[j]))
tot_wordchars++;
else if (is_sentend(s[j]))
tot_sents++;
if (is_wordend(s[j]))
{
// try to account for acronyms
while (ispunct(s[j]) && !isspace(s[j+1]))
j++;
end = j;
if (end-start > BUFLEN)
{
tot_nonwords++;
continue;
}
else
tot_words++;
memcpy(buf, s+start, end-start);
buf[end-start] = '\0';
uint32_t word_sylls = count_syllables(buf, end-start);
tot_sylls += word_sylls;
if (word_sylls > 2)
tot_polys++;
if (is_sentend(s[j]))
tot_sents++;
while (ispunct(s[j]) || isspace(s[j]))
j++;
start = j;
if (isalnum(s[j]))
tot_wordchars++;
}
}
INT(chars, i) = slen;
INT(wordchars, i) = tot_wordchars;
INT(words, i) = tot_words;
INT(nw, i) = tot_nonwords;
INT(sents, i) = tot_sents;
INT(sylls, i) = tot_sylls;
INT(polys, i) = tot_polys;
DBL(re, i) = re_score(tot_words, tot_sents, tot_sylls);
DBL(gl, i) = gl_score(tot_words, tot_sents, tot_sylls);
INT(ari, i) = ari_score(tot_wordchars, tot_words, tot_sents);
DBL(smog, i) = smog_score(tot_polys, tot_sents);
DBL(cl, i) = cl_score(tot_wordchars, tot_words, tot_sents);
}
}
make_list_names(ret_names, 12, "chars", "wordchars", "words", "nonwords", "sents", "sylls", "polys", "re", "gl", "ari", "smog", "cl");
make_dataframe(ret, RNULL, ret_names, 12, chars, wordchars, words, nw, sents, sylls, polys, re, gl, ari, smog, cl);
R_END;
return ret;
}
// -------------------------------------------------------
// Syllable counter
// can not be put into separate file because gperf data isn't guarded correctly
// -------------------------------------------------------
static inline int count_words(const int len, const char*const restrict buf)
{
int nw = 0;
for (int i=0; i<=len; i++)
{
if (is_wordend(buf[i]))
{
nw++;
while (ispunct(buf[i]) || isspace(buf[i]))
i++;
}
}
return nw;
}
// NOTE: not thread safe because of the R object memory allocations
static SEXP R_sylcount_countsAndWords(SEXP s_)
{
SEXP ret;
const int len = LENGTH(s_);
newRlist(ret, len);
for (int i=0; i<len; i++)
{
SEXP localdf, localdf_names;
SEXP word, sylls;
const char *const s = CHARPT(s_, i);
const int slen = strlen(s);
int nwords = count_words(slen, s);
newRvec(word, nwords, "str");
newRvec(sylls, nwords, "int");
make_list_names(localdf_names, 2, "word", "syllables");
make_dataframe(localdf, RNULL, localdf_names, 2, word, sylls);
SET_VECTOR_ELT(ret, i, localdf);
int start = 0;
int end;
int words_found = 0;
for (int j=0; j<=slen; j++)
{
if (is_wordend(s[j]))
{
end = j;
const int wordlen = end-start;
SET_STRING_ELT(word, words_found, mkCharLen(s+start, wordlen));
if (wordlen > BUFLEN)
INT(sylls, words_found) = NA_INTEGER;
else
INT(sylls, words_found) = count_syllables(CHARPT(word, words_found), wordlen);
while (ispunct(s[j]) || isspace(s[j]))
j++;
start = j;
words_found++;
}
}
UNPROTECT(4);
}
// R_END;
UNPROTECT(1);
return ret;
}
// NOTE: not thread safe because of the R object memory allocations
static SEXP R_sylcount_countsOnly(SEXP s_)
{
SEXP ret;
char buf[BUFLEN];
const int len = LENGTH(s_);
newRlist(ret, len);
for (int i=0; i<len; i++)
{
SEXP sylls;
const char*const s = CHARPT(s_, i);
const int slen = strlen(s);
if (slen == 0)
{
SET_VECTOR_ELT(ret, i, ScalarInteger(NA_INTEGER));
continue;
}
int nwords = count_words(slen, s);
newRvec(sylls, nwords, "int");
SET_VECTOR_ELT(ret, i, sylls);
int start = 0;
int end;
int words_found = 0;
for (int j=0; j<=slen; j++)
{
if (is_wordend(s[j]))
{
end = j;
const int wordlen = end - start;
if (wordlen > BUFLEN)
INT(sylls, words_found) = NA_INTEGER;
else
{
memcpy(buf, s+start, wordlen);
buf[wordlen] = '\0';
INT(sylls, words_found) = count_syllables(buf, wordlen);
}
while (ispunct(s[j]) || isspace(s[j]))
j++;
start = j;
words_found++;
}
}
UNPROTECT(1);
}
// R_END;
UNPROTECT(1);
return ret;
}
SEXP R_sylcount(SEXP s, SEXP counts_only)
{
CHECK_IS_STRINGS(s);
CHECK_IS_FLAG(counts_only, "counts.only");
if (INT(counts_only))
return R_sylcount_countsOnly(s);
else
return R_sylcount_countsAndWords(s);
}
// -------------------------------------------------------
// Basic text document count summaries
// -------------------------------------------------------
SEXP R_corpus_summary(SEXP s_, SEXP nthreads_)
{
SEXP ret, ret_names;
SEXP chars, wordchars, words, nw, sents, sylls, polys;
CHECK_IS_STRINGS(s_);
CHECK_IS_POSINT(nthreads_, "nthreads");
const int len = LENGTH(s_);
int nthreads = asInteger(nthreads_);
newRvec(chars, len, "int");
newRvec(wordchars, len, "int");
newRvec(words, len, "int");
newRvec(nw, len, "int");
newRvec(sents, len, "int");
newRvec(sylls, len, "int");
newRvec(polys, len, "int");
#ifdef _OPENMP
#pragma omp parallel num_threads(nthreads)
#endif
{
char buf[BUFLEN];
#ifdef _OPENMP
#pragma omp for
#endif
for (int i=0; i<len; i++)
{
const char *const s = CHARPT(s_, i);
const int slen = strlen(s);
int j = 0;
while (j < slen && s[j] == ' ')
j++;
if (slen == 0 || j == slen)
{
counts_set_degenerate(chars, wordchars, words, nw, sents, sylls, polys, i);
continue;
}
uint32_t tot_wordchars = 0;
uint32_t tot_words = 0;
uint32_t tot_nonwords = 0;
uint32_t tot_sents = 0;
uint32_t tot_sylls = 0;
uint32_t tot_polys = 0;
int start = 0;
int end;
for (; j<=slen && slen>0; j++)
{
if (isalnum(s[j]))
tot_wordchars++;
else if (is_sentend(s[j]))
tot_sents++;
if (is_wordend(s[j]))
{
// try to account for acronyms
while (ispunct(s[j]) && !isspace(s[j+1]))
j++;
end = j;
if (end-start+1 > BUFLEN)
{
tot_nonwords++;
continue;
}
else
tot_words++;
memcpy(buf, s+start, end-start);
buf[end-start] = '\0';
uint32_t word_sylls = count_syllables(buf, end-start);
tot_sylls += word_sylls;
if (word_sylls > 2)
tot_polys++;
if (is_sentend(s[j]))
tot_sents++;
while (ispunct(s[j]) || isspace(s[j]))
j++;
start = j;
if (isalnum(s[j]))
tot_wordchars++;
}
}
INT(chars, i) = slen;
INT(wordchars, i) = tot_wordchars;
INT(words, i) = tot_words;
INT(nw, i) = tot_nonwords;
INT(sents, i) = tot_sents;
INT(sylls, i) = tot_sylls;
INT(polys, i) = tot_polys;
}
}
make_list_names(ret_names, 7, "chars", "wordchars", "words", "nonwords", "sents", "sylls", "polys");
make_dataframe(ret, RNULL, ret_names, 7, chars, wordchars, words, nw, sents, sylls, polys);
R_END;
return ret;
}
|
conservar.c | /*
A simple 2D hydro code
(C) Romain Teyssier : CEA/IRFU -- original F90 code
(C) Pierre-Francois Lavallee : IDRIS -- original F90 code
(C) Guillaume Colin de Verdiere : CEA/DAM -- for the C version
*/
/*
This software is governed by the CeCILL license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL license and that you accept its terms.
*/
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <stdio.h>
#ifndef HMPP
#include "parametres.h"
#include "utils.h"
#include "conservar.h"
#include "perfcnt.h"
#define BLOCKING 0
#define SSST 32
#define JJST 32
void
gatherConservativeVars(const int idim,
const int rowcol,
const int Himin,
const int Himax,
const int Hjmin,
const int Hjmax,
const int Hnvar,
const int Hnxt,
const int Hnyt,
const int Hnxyt,
const int slices, const int Hstep,
real_t uold[Hnvar * Hnxt * Hnyt], real_t u[Hnvar][Hstep][Hnxyt]
) {
int i, j, ivar, s;
#define IHU(i, j, v) ((i) + Hnxt * ((j) + Hnyt * (v)))
#define IHST(v,s,i) ((i) + Hstep * ((j) + Hnvar * (v)))
WHERE("gatherConservativeVars");
if (idim == 1) {
// Gather conservative variables
#pragma omp parallel for private(i, s), shared(u) COLLAPSE
for (s = 0; s < slices; s++) {
for (i = Himin; i < Himax; i++) {
int idxuoID = IHU(i, rowcol + s, ID);
u[ID][s][i] = uold[idxuoID];
int idxuoIU = IHU(i, rowcol + s, IU);
u[IU][s][i] = uold[idxuoIU];
int idxuoIV = IHU(i, rowcol + s, IV);
u[IV][s][i] = uold[idxuoIV];
int idxuoIP = IHU(i, rowcol + s, IP);
u[IP][s][i] = uold[idxuoIP];
}
}
if (Hnvar > IP) {
for (ivar = IP + 1; ivar < Hnvar; ivar++) {
for (s = 0; s < slices; s++) {
for (i = Himin; i < Himax; i++) {
u[ivar][s][i] = uold[IHU(i, rowcol + s, ivar)];
}
}
}
}
//
} else {
// Gather conservative variables
#pragma omp parallel for private(j, s), shared(u)
for (s = 0; s < slices; s++) {
for (j = Hjmin; j < Hjmax; j++) {
u[ID][s][j] = uold[IHU(rowcol + s, j, ID)];
u[IU][s][j] = uold[IHU(rowcol + s, j, IV)];
u[IV][s][j] = uold[IHU(rowcol + s, j, IU)];
u[IP][s][j] = uold[IHU(rowcol + s, j, IP)];
}
}
if (Hnvar > IP) {
for (ivar = IP + 1; ivar < Hnvar; ivar++) {
for (s = 0; s < slices; s++) {
for (j = Hjmin; j < Hjmax; j++) {
u[ivar][s][j] = uold[IHU(rowcol + s, j, ivar)];
}
}
}
}
}
}
#undef IHU
void
updateConservativeVars(const int idim,
const int rowcol,
const real_t dtdx,
const int Himin,
const int Himax,
const int Hjmin,
const int Hjmax,
const int Hnvar,
const int Hnxt,
const int Hnyt,
const int Hnxyt,
const int slices, const int Hstep,
real_t uold[Hnvar * Hnxt * Hnyt], real_t u[Hnvar][Hstep][Hnxyt], real_t flux[Hnvar][Hstep][Hnxyt]
) {
int i, j, ivar, s;
WHERE("updateConservativeVars");
#define IHU(i, j, v) ((i) + Hnxt * ((j) + Hnyt * (v)))
if (idim == 1) {
// Update conservative variables
#pragma omp parallel for private(ivar, s,i), shared(uold) COLLAPSE
for (s = 0; s < slices; s++) {
for (ivar = 0; ivar <= IP; ivar++) {
for (i = Himin + ExtraLayer; i < Himax - ExtraLayer; i++) {
uold[IHU(i, rowcol + s, ivar)] = u[ivar][s][i] + (flux[ivar][s][i - 2] - flux[ivar][s][i - 1]) * dtdx;
}
}
}
{
int nops = (IP+1) * slices * ((Himax - ExtraLayer) - (Himin + ExtraLayer));
FLOPS(3 * nops, 0 * nops, 0 * nops, 0 * nops);
}
if (Hnvar > IP) {
for (ivar = IP + 1; ivar < Hnvar; ivar++) {
for (s = 0; s < slices; s++) {
for (i = Himin + ExtraLayer; i < Himax - ExtraLayer; i++) {
uold[IHU(i, rowcol + s, ivar)] = u[ivar][s][i] + (flux[ivar][s][i - 2] - flux[ivar][s][i - 1]) * dtdx;
}
}
}
}
} else {
// Update conservative variables
#pragma omp parallel for private(j, s), shared(uold)
for (s = 0; s < slices; s++) {
for (j = (Hjmin + ExtraLayer); j < (Hjmax - ExtraLayer); j++) {
uold[IHU(rowcol + s, j, ID)] = u[ID][s][j] + (flux[ID][s][j - 2] - flux[ID][s][j - 1]) * dtdx;
uold[IHU(rowcol + s, j, IV)] = u[IU][s][j] + (flux[IU][s][j - 2] - flux[IU][s][j - 1]) * dtdx;
uold[IHU(rowcol + s, j, IU)] = u[IV][s][j] + (flux[IV][s][j - 2] - flux[IV][s][j - 1]) * dtdx;
uold[IHU(rowcol + s, j, IP)] = u[IP][s][j] + (flux[IP][s][j - 2] - flux[IP][s][j - 1]) * dtdx;
}
}
{
int nops = slices * ((Hjmax - ExtraLayer) - (Hjmin + ExtraLayer));
FLOPS(12 * nops, 0 * nops, 0 * nops, 0 * nops);
}
if (Hnvar > IP) {
for (ivar = IP + 1; ivar < Hnvar; ivar++) {
for (s = 0; s < slices; s++) {
for (j = Hjmin + ExtraLayer; j < Hjmax - ExtraLayer; j++) {
uold[IHU(rowcol + s, j, ivar)] = u[ivar][s][j] + (flux[ivar][s][j - 2] - flux[ivar][s][j - 1]) * dtdx;
}
}
}
}
}
}
#undef IHU
#endif
//EOF
|
ADR_SUPGassembler_C_omp.c | /* This file is part of redbKIT.
* Copyright (c) 2016, Ecole Polytechnique Federale de Lausanne (EPFL)
* Author: Federico Negri <federico.negri@epfl.ch>
*/
#include "mex.h"
#include <stdio.h>
#include <math.h>
#include "blas.h"
#include <string.h>
#include "../../Core/Tools.h"
#define INVJAC(i,j,k) invjac[i+(j+k*dim)*noe]
#define GRADREFPHI(i,j,k) gradrefphi[i+(j+k*NumQuadPoints)*nln]
#ifdef _OPENMP
#include <omp.h>
#else
#warning "OpenMP not enabled. Compile with mex ADR_SUPGassembler_C_omp.c CFLAGS="\$CFLAGS -fopenmp" LDFLAGS="\$LDFLAGS -fopenmp""
#endif
void mexFunction(int nlhs,mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
/* Check for proper number of arguments. */
if(nrhs!=14) {
mexErrMsgTxt("14 inputs are required.");
} else if(nlhs>6) {
mexErrMsgTxt("Too many output arguments.");
}
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[3]);
double* nln_ptr = mxGetPr(prhs[4]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[3]);
int nln2 = nln*nln;
double* tmp_ptr1 = mxGetPr(prhs[2]);
double dt = tmp_ptr1[0];
/**/
plhs[0] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[3] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[4] = mxCreateDoubleMatrix(nln*noe,1, mxREAL);
plhs[5] = mxCreateDoubleMatrix(nln*noe,1, mxREAL);
double* myArows = mxGetPr(plhs[0]);
double* myAcols = mxGetPr(plhs[1]);
double* myAcoef = mxGetPr(plhs[2]);
double* myMcoef = mxGetPr(plhs[3]);
double* myRrows = mxGetPr(plhs[4]);
double* myRcoef = mxGetPr(plhs[5]);
/* copy the string data from prhs[0] into a C string input_ buf. */
char *StabType = mxArrayToString(prhs[1]);
bool flag_t = false;
if (strcmp(StabType, "SUPGt")==0)
{
flag_t = true;
}
mxFree(StabType);
/* Local mass matrix (computed only once) with quadrature nodes */
double LocalMass[nln][nln];
int q;
int NumQuadPoints = mxGetN(prhs[9]);
double* mu = mxGetPr(prhs[5]);
double* conv_field = mxGetPr(prhs[6]);
double* si = mxGetPr(prhs[7]);
double* f = mxGetPr(prhs[8]);
double* w = mxGetPr(prhs[9]);
double* invjac = mxGetPr(prhs[10]);
double* detjac = mxGetPr(prhs[11]);
double* phi = mxGetPr(prhs[12]);
double* gradrefphi = mxGetPr(prhs[13]);
int l,k;
double* elements = mxGetPr(prhs[3]);
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,mu,conv_field,si,f,detjac,elements, myRrows, myRcoef,myAcols, myArows, myAcoef, myMcoef) private(ie,k,l,q) firstprivate(phi,gradrefphi, w, numRowsElements, nln2, nln)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double gradphi[dim][nln][NumQuadPoints];
int d1, d2;
for (k = 0; k < nln; k = k + 1 )
{
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[d1][k][q] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[d1][k][q] = gradphi[d1][k][q] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
}
/* compute metric tensors G and g */
double G[dim][dim];
double g[dim];
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
g[d1] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
G[d1][d2] = 0.0;
int d3;
for (d3 = 0; d3 < dim; d3 = d3 + 1 )
{
G[d1][d2] += INVJAC(ie,d1,d3) * INVJAC(ie,d2,d3);
}
g[d1] = g[d1] + INVJAC(ie,d1,d2);
}
}
double traceGtG = Mdot(dim, G, G);
double tauK[NumQuadPoints];
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
double b_hq[dim];
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
b_hq[d1] = conv_field[ie+(q+d1*NumQuadPoints)*noe];
}
double G_U_hq[dim];
MatrixVector(dim, dim, G, b_hq, G_U_hq);
tauK[q] = pow( flag_t * 4/(dt*dt) + ScalarProduct(dim, b_hq, G_U_hq) + 9*mu[ie+q*noe]*mu[ie+q*noe]*traceGtG, -0.5);
}
int iii = 0;
int ii = 0;
int a, b;
double bh_gradPHI[nln][NumQuadPoints];
for (k = 0; k < nln; k = k + 1 )
{
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
bh_gradPHI[k][q] = 0;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
bh_gradPHI[k][q] += conv_field[ie+(q+d1*NumQuadPoints)*noe] * gradphi[d1][k][q];
}
}
}
/* a test, b trial */
for (a = 0; a < nln; a = a + 1 )
{
for (b = 0; b < nln; b = b + 1 )
{
double aloc = 0;
double mloc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
aloc += (bh_gradPHI[b][q] + si[ie+q*noe] * phi[b+q*nln]) * bh_gradPHI[a][q] * tauK[q] * w[q];
mloc += phi[b+q*nln] * bh_gradPHI[a][q] * tauK[q] * w[q];
}
myArows[ie*nln2+iii] = elements[a+ie*numRowsElements];
myAcols[ie*nln2+iii] = elements[b+ie*numRowsElements];
myAcoef[ie*nln2+iii] = aloc*detjac[ie];
myMcoef[ie*nln2+iii] = mloc*detjac[ie];
iii = iii + 1;
}
double floc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
floc += ( bh_gradPHI[a][q] * f[ie+q*noe] * tauK[q] ) * w[q];
}
myRrows[ie*nln+ii] = elements[a+ie*numRowsElements];
myRcoef[ie*nln+ii] = floc*detjac[ie];
ii = ii + 1;
}
}
}
|
element.h | /* All or part of this file was contributed by Intel under license:
* Copyright (C) 2017-2018 Intel Corporation
* SPDX-License-Identifier: MIT
*/
#pragma once
#include "tensors/tensor.h"
namespace marian {
namespace cpu {
template <size_t K, bool broadcast, class Functor>
void gElement(Functor functor,
functional::Array<functional::Tensor<float>, K> tensors) {
int length = tensors[0].shape().elements();
functional::Array<int, functional::Shape::size()> dims;
functional::Array<int, K> indices;
#pragma omp parallel for simd
for(int index = 0; index < length; ++index) {
indices.fill(index);
if(broadcast) {
tensors[0].shape().dims(index, dims);
for(int i = 1; i < K; ++i)
indices[i] = tensors[i].shape().bindex(dims);
}
tensors[0][index] = functional::apply(functor, tensors, indices);
}
}
template <class Functor, class ...Tensors>
void Element(Functor functor, marian::Tensor out, Tensors ...tensors) {
constexpr size_t K = sizeof...(tensors) + 1;
functional::Array<functional::Tensor<float>, K> gTensors = {out, tensors...};
int length = gTensors[0].shape().elements();
bool broadcast = false;
for(int i = 1; i < K; ++i)
broadcast = broadcast || gTensors[0].shape() != gTensors[i].shape();
if(broadcast)
cpu::gElement<K, true>(functor, gTensors);
else
cpu::gElement<K, false>(functor, gTensors);
}
}
}
|
DRB064-outeronly2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
The inner loop has loop carried true data dependence.
However, the loop is not parallelized so no race condition.
*/
double b[100][100];
#define N 100
int init()
{
int i,j,k;
#pragma omp parallel for private(i, j)
for (i = 0; i < N; i++) {
#pragma omp parallel for private(j)
for (j = 0; j < N; j++) {
b[i][j] = i * j;
}
}
return 0;
}
void foo(int n, int m)
{
int i,j;
#pragma omp parallel for private(i, j)
for (i=0;i<n;i++)
for (j=1;j<m;j++) // Be careful about bounds of j
b[i][j]=b[i][j-1];
}
int print()
{
int i,j,k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%lf\n", b[i][j]);
}
}
return 0;
}
int main()
{
init();
foo(100, 100);
print();
return 0;
}
|
sparse_block_matrix_diagonal.h | // g2o - General Graph Optimization
// Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef G2O_SPARSE_BLOCK_MATRIX_DIAGONAL_H
#define G2O_SPARSE_BLOCK_MATRIX_DIAGONAL_H
#include <vector>
#include <Eigen/Core>
#include <Eigen/StdVector>
#include "g2o/config.h"
#include "matrix_operations.h"
namespace g2o {
/**
* \brief Sparse matrix which uses blocks on the diagonal
*
* This class is used as a const view on a SparseBlockMatrix
* which allows a faster iteration over the elements of the
* matrix.
*/
template <class MatrixType> class SparseBlockMatrixDiagonal {
public:
//! this is the type of the elementary block, it is an Eigen::Matrix.
typedef MatrixType SparseMatrixBlock;
//! columns of the matrix
int cols() const { return _blockIndices.size() ? _blockIndices.back() : 0; }
//! rows of the matrix
int rows() const { return _blockIndices.size() ? _blockIndices.back() : 0; }
typedef std::vector<MatrixType, Eigen::aligned_allocator<MatrixType>>
DiagonalVector;
SparseBlockMatrixDiagonal(const std::vector<int> &blockIndices)
: _blockIndices(blockIndices) {}
//! how many rows/cols does the block at block-row / block-column r has?
inline int dimOfBlock(int r) const {
return r ? _blockIndices[r] - _blockIndices[r - 1] : _blockIndices[0];
}
//! where does the row /col at block-row / block-column r starts?
inline int baseOfBlock(int r) const { return r ? _blockIndices[r - 1] : 0; }
//! the block matrices per block-column
const DiagonalVector &diagonal() const { return _diagonal; }
DiagonalVector &diagonal() { return _diagonal; }
//! indices of the row blocks
const std::vector<int> &blockIndices() const { return _blockIndices; }
void multiply(double *&dest, const double *src) const {
int destSize = cols();
if (!dest) {
dest = new double[destSize];
memset(dest, 0, destSize * sizeof(double));
}
// map the memory by Eigen
Eigen::Map<Eigen::VectorXd> destVec(dest, destSize);
Eigen::Map<const Eigen::VectorXd> srcVec(src, rows());
#ifdef G2O_OPENMP
#pragma omp parallel for default(shared) schedule(dynamic, 10)
#endif
for (int i = 0; i < static_cast<int>(_diagonal.size()); ++i) {
int destOffset = baseOfBlock(i);
int srcOffset = destOffset;
const SparseMatrixBlock &A = _diagonal[i];
// destVec += *A.transpose() * srcVec (according to the sub-vector
// parts)
internal::axpy(A, srcVec, srcOffset, destVec, destOffset);
}
}
protected:
const std::vector<int> &_blockIndices; ///< vector of the indices of the
///< blocks along the diagonal
DiagonalVector _diagonal;
};
} // namespace g2o
#endif
|
GB_resize.c | //------------------------------------------------------------------------------
// GB_resize: change the size of a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB_select.h"
#define GB_FREE_ALL \
{ \
GB_FREE (&Ax_new, Ax_new_size) ; \
GB_FREE (&Ab_new, Ab_new_size) ; \
GB_phbix_free (A) ; \
}
//------------------------------------------------------------------------------
// GB_resize: resize a GrB_Matrix
//------------------------------------------------------------------------------
GrB_Info GB_resize // change the size of a matrix
(
GrB_Matrix A, // matrix to modify
const GrB_Index nrows_new, // new number of rows in matrix
const GrB_Index ncols_new, // new number of columns in matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GB_void *restrict Ax_new = NULL ; size_t Ax_new_size = 0 ;
int8_t *restrict Ab_new = NULL ; size_t Ab_new_size = 0 ;
ASSERT_MATRIX_OK (A, "A to resize", GB0) ;
//--------------------------------------------------------------------------
// handle the CSR/CSC format
//--------------------------------------------------------------------------
int64_t vdim_old = A->vdim ;
int64_t vlen_old = A->vlen ;
int64_t vlen_new, vdim_new ;
if (A->is_csc)
{
vlen_new = nrows_new ;
vdim_new = ncols_new ;
}
else
{
vlen_new = ncols_new ;
vdim_new = nrows_new ;
}
if (vdim_new == vdim_old && vlen_new == vlen_old)
{
// nothing to do
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// delete any lingering zombies and assemble any pending tuples
//--------------------------------------------------------------------------
// only do so if either dimension is shrinking, or if pending tuples exist
// and vdim_old <= 1 and vdim_new > 1, since in that case, Pending->j has
// not been allocated yet, but would be required in the resized matrix.
// If A is jumbled, it must be sorted.
if (vdim_new < vdim_old || vlen_new < vlen_old || A->jumbled ||
(GB_PENDING (A) && vdim_old <= 1 && vdim_new > 1))
{
GB_MATRIX_WAIT (A) ;
ASSERT_MATRIX_OK (A, "A to resize, wait", GB0) ;
}
ASSERT (!GB_JUMBLED (A)) ;
//--------------------------------------------------------------------------
// resize the matrix
//--------------------------------------------------------------------------
const bool A_is_bitmap = GB_IS_BITMAP (A) ;
const bool A_is_full = GB_IS_FULL (A) ;
const bool A_is_shrinking = (vdim_new <= vdim_old && vlen_new <= vlen_old) ;
if ((A_is_full || A_is_bitmap) && A_is_shrinking)
{
//----------------------------------------------------------------------
// A is full or bitmap
//----------------------------------------------------------------------
// get the old and new dimensions
int64_t anz_new = 1 ;
bool ok = GB_Index_multiply ((GrB_Index *) &anz_new,
vlen_new, vdim_new) ;
if (!ok) anz_new = 1 ;
size_t nzmax_new = GB_IMAX (anz_new, 1) ;
bool in_place = A_is_full && (vlen_new == vlen_old || vdim_new <= 1) ;
size_t asize = A->type->size ;
const bool A_iso = A->iso ;
//----------------------------------------------------------------------
// allocate or reallocate A->x, unless A is iso
//----------------------------------------------------------------------
ok = true ;
if (!A_iso)
{
if (in_place)
{
// reallocate A->x in-place; no data movement needed
GB_REALLOC (A->x, nzmax_new*asize, GB_void, &(A->x_size), &ok,
Context) ;
}
else
{
// allocate new space for A->x
Ax_new = GB_MALLOC (nzmax_new*asize, GB_void, &Ax_new_size) ;
ok = (Ax_new != NULL) ;
}
}
//----------------------------------------------------------------------
// allocate or reallocate A->b
//----------------------------------------------------------------------
if (!in_place && A_is_bitmap)
{
// allocate new space for A->b
Ab_new = GB_MALLOC (nzmax_new*asize, int8_t, &Ab_new_size) ;
ok = ok && (Ab_new != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// move data if not in-place
//----------------------------------------------------------------------
if (!in_place)
{
//------------------------------------------------------------------
// determine number of threads to use
//------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz_new, chunk, nthreads_max) ;
//------------------------------------------------------------------
// resize Ax, unless A is iso
//------------------------------------------------------------------
if (!A_iso)
{
GB_void *restrict Ax_old = (GB_void *) A->x ;
int64_t j ;
if (vdim_new <= 4*nthreads)
{
// use all threads for each vector
for (j = 0 ; j < vdim_new ; j++)
{
GB_void *pdest = Ax_new + j * vlen_new * asize ;
GB_void *psrc = Ax_old + j * vlen_old * asize ;
GB_memcpy (pdest, psrc, vlen_new * asize, nthreads) ;
}
}
else
{
// use a single thread for each vector
#pragma omp parallel for num_threads(nthreads) \
schedule(static)
for (j = 0 ; j < vdim_new ; j++)
{
GB_void *pdest = Ax_new + j * vlen_new * asize ;
GB_void *psrc = Ax_old + j * vlen_old * asize ;
memcpy (pdest, psrc, vlen_new * asize) ;
}
}
GB_FREE (&Ax_old, A->x_size) ;
A->x = Ax_new ; A->x_size = Ax_new_size ;
}
//------------------------------------------------------------------
// resize Ab if A is bitmap, and count the # of entries
//------------------------------------------------------------------
if (A_is_bitmap)
{
int8_t *restrict Ab_old = A->b ;
int64_t pnew ;
int64_t anvals = 0 ;
#pragma omp parallel for num_threads(nthreads) \
schedule(static) reduction(+:anvals)
for (pnew = 0 ; pnew < anz_new ; pnew++)
{
int64_t i = pnew % vlen_new ;
int64_t j = pnew / vlen_new ;
int64_t pold = i + j * vlen_old ;
int8_t ab = Ab_old [pold] ;
Ab_new [pnew] = ab ;
anvals += ab ;
}
A->nvals = anvals ;
GB_FREE (&Ab_old, A->b_size) ;
A->b = Ab_new ; A->b_size = Ab_new_size ;
}
}
//----------------------------------------------------------------------
// adjust dimensions and return result
//----------------------------------------------------------------------
A->vdim = vdim_new ;
A->vlen = vlen_new ;
A->nvec = vdim_new ;
A->nvec_nonempty = (vlen_new == 0) ? 0 : vdim_new ;
ASSERT_MATRIX_OK (A, "A bitmap/full shrunk", GB0) ;
return (GrB_SUCCESS) ;
}
else
{
//----------------------------------------------------------------------
// convert A to hypersparse and resize it
//----------------------------------------------------------------------
// convert to hypersparse
GB_OK (GB_convert_any_to_hyper (A, Context)) ;
ASSERT (GB_IS_HYPERSPARSE (A)) ;
// resize the number of sparse vectors
int64_t *restrict Ah = A->h ;
int64_t *restrict Ap = A->p ;
A->vdim = vdim_new ;
if (vdim_new < A->plen)
{
// reduce the size of A->p and A->h; this cannot fail
info = GB_hyper_realloc (A, vdim_new, Context) ;
ASSERT (info == GrB_SUCCESS) ;
Ap = A->p ;
Ah = A->h ;
}
if (vdim_new < vdim_old)
{
// descrease A->nvec to delete the vectors outside the range
// 0...vdim_new-1.
int64_t pleft = 0 ;
int64_t pright = GB_IMIN (A->nvec, vdim_new) - 1 ;
bool found ;
GB_SPLIT_BINARY_SEARCH (vdim_new, Ah, pleft, pright, found) ;
A->nvec = pleft ;
}
if (vdim_new < vdim_old)
{
// number of vectors is decreasing, need to count the new number of
// non-empty vectors: done during pruning or by selector, below.
A->nvec_nonempty = -1 ; // recomputed just below
}
//----------------------------------------------------------------------
// resize the length of each vector
//----------------------------------------------------------------------
// if vlen is shrinking, delete entries outside the new matrix
if (vlen_new < vlen_old)
{
GB_OK (GB_selector (NULL /* A in-place */, GB_RESIZE_opcode, NULL,
false, A, vlen_new-1, NULL, Context)) ;
}
//----------------------------------------------------------------------
// vlen has been resized
//----------------------------------------------------------------------
A->vlen = vlen_new ;
ASSERT_MATRIX_OK (A, "A vlen resized", GB0) ;
//----------------------------------------------------------------------
// conform the matrix to its desired sparsity structure
//----------------------------------------------------------------------
return (GB_conform (A, Context)) ;
}
}
|
par_add_cycle.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* ParAMG cycling routine
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGCycle
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGAdditiveCycle( void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
hypre_ParCSRMatrix **A_array;
hypre_ParCSRMatrix **P_array;
hypre_ParCSRMatrix **R_array;
hypre_ParCSRMatrix *Lambda;
hypre_ParCSRMatrix *Atilde;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParVector *Vtemp;
hypre_ParVector *Ztemp;
hypre_ParVector *Xtilde, *Rtilde;
HYPRE_Int **CF_marker_array;
HYPRE_Int num_levels;
HYPRE_Int addlvl, add_end;
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int simple;
HYPRE_Int add_last_lvl;
HYPRE_Int i, j, num_rows;
HYPRE_Int n_global;
HYPRE_Int rlx_order;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
HYPRE_Int level;
HYPRE_Int coarse_grid;
HYPRE_Int fine_grid;
HYPRE_Int rlx_down;
HYPRE_Int rlx_up;
HYPRE_Int rlx_coarse;
HYPRE_Int *grid_relax_type;
HYPRE_Int *num_grid_sweeps;
hypre_Vector **l1_norms;
HYPRE_Real alpha, beta;
HYPRE_Real *u_data;
HYPRE_Real *v_data;
hypre_Vector *l1_norms_lvl;
HYPRE_Real *D_inv;
HYPRE_Real *x_global;
HYPRE_Real *r_global;
HYPRE_Real *relax_weight;
HYPRE_Real *omega;
#if 0
HYPRE_Real *D_mat;
HYPRE_Real *S_vec;
#endif
HYPRE_ANNOTATE_FUNC_BEGIN;
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
P_array = hypre_ParAMGDataPArray(amg_data);
R_array = hypre_ParAMGDataRArray(amg_data);
CF_marker_array = hypre_ParAMGDataCFMarkerArray(amg_data);
Vtemp = hypre_ParAMGDataVtemp(amg_data);
Ztemp = hypre_ParAMGDataZtemp(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
additive = hypre_ParAMGDataAdditive(amg_data);
mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
simple = hypre_ParAMGDataSimple(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data);
Lambda = hypre_ParAMGDataLambda(amg_data);
Atilde = hypre_ParAMGDataAtilde(amg_data);
Xtilde = hypre_ParAMGDataXtilde(amg_data);
Rtilde = hypre_ParAMGDataRtilde(amg_data);
l1_norms = hypre_ParAMGDataL1Norms(amg_data);
D_inv = hypre_ParAMGDataDinv(amg_data);
relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
omega = hypre_ParAMGDataOmega(amg_data);
rlx_order = hypre_ParAMGDataRelaxOrder(amg_data);
num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data);
/* Initialize */
addlvl = hypre_max(additive, mult_additive);
addlvl = hypre_max(addlvl, simple);
if (add_last_lvl == -1 ) add_end = num_levels-1;
else add_end = add_last_lvl;
Solve_err_flag = 0;
/*---------------------------------------------------------------------
* Main loop of cycling --- multiplicative version --- V-cycle
*--------------------------------------------------------------------*/
/* down cycle */
rlx_down = grid_relax_type[1];
rlx_up = grid_relax_type[2];
rlx_coarse = grid_relax_type[3];
for (level = 0; level < num_levels-1; level++)
{
HYPRE_ANNOTATE_MGLEVEL_BEGIN(level);
fine_grid = level;
coarse_grid = level + 1;
u_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[fine_grid]));
v_data = hypre_VectorData(hypre_ParVectorLocalVector(Vtemp));
l1_norms_lvl = l1_norms[level];
hypre_ParVectorSetConstantValues(U_array[coarse_grid], 0.0);
if (level < addlvl || level > add_end) /* multiplicative version */
{
/* smoothing step */
if (rlx_down == 0)
{
HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
u_data[i] = relax_weight[level]*v_data[i] / A_data[A_i[i]];
}
}
else if (rlx_down != 18)
{
/*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_down,0,*/
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid], rlx_down,rlx_order,1,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[level] ? hypre_VectorData(l1_norms[level]) : NULL,
U_array[fine_grid], Vtemp, Ztemp);
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
}
}
else
{
num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
u_data[i] += v_data[i] / hypre_VectorData(l1_norms_lvl)[i];
}
}
}
alpha = -1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, A_array[fine_grid], U_array[fine_grid],
beta, Vtemp);
alpha = 1.0;
beta = 0.0;
hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,
beta,F_array[coarse_grid]);
}
else /* additive version */
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
if (level == 0) /* compute residual */
{
hypre_ParVectorCopy(Vtemp, Rtilde);
hypre_ParVectorCopy(U_array[fine_grid],Xtilde);
}
alpha = 1.0;
beta = 0.0;
hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,
beta,F_array[coarse_grid]);
}
HYPRE_ANNOTATE_MGLEVEL_END(level);
}
/* additive smoothing and solve coarse grid */
HYPRE_ANNOTATE_MGLEVEL_BEGIN(num_levels - 1);
if (addlvl < num_levels)
{
if (simple > -1)
{
x_global = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_global = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Xtilde));
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_global; i++)
x_global[i] += D_inv[i]*r_global[i];
}
else
{
if (num_grid_sweeps[1] > 1)
{
n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Rtilde));
hypre_ParVector *Tmptilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
hypre_Vector *Tmptilde_local = hypre_SeqVectorCreate(n_global);
hypre_SeqVectorInitialize(Tmptilde_local);
hypre_ParVectorLocalVector(Tmptilde) = Tmptilde_local;
hypre_ParVectorOwnsData(Tmptilde) = 1;
hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 0.0, Tmptilde);
hypre_ParVectorScale(2.0,Rtilde);
hypre_ParCSRMatrixMatvec(-1.0, Atilde, Tmptilde, 1.0, Rtilde);
hypre_ParVectorDestroy(Tmptilde);
}
hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 1.0, Xtilde);
}
if (addlvl == 0) hypre_ParVectorCopy(Xtilde, U_array[0]);
}
if (add_end < num_levels -1)
{
fine_grid = num_levels -1;
for (j=0; j < num_grid_sweeps[3]; j++)
if (rlx_coarse == 18)
hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid],
1, 1,
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
1.0, 1.0 ,0,0,0,0,
U_array[fine_grid], Vtemp, Ztemp);
else
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
NULL, rlx_coarse,0,0,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
U_array[fine_grid], Vtemp, Ztemp);
}
HYPRE_ANNOTATE_MGLEVEL_END(num_levels - 1);
/* up cycle */
for (level = num_levels-1; level > 0; level--)
{
HYPRE_ANNOTATE_MGLEVEL_BEGIN(level);
fine_grid = level - 1;
coarse_grid = level;
if (level <= addlvl || level > add_end+1) /* multiplicative version */
{
alpha = 1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid],
U_array[coarse_grid],
beta, U_array[fine_grid]);
if (rlx_up != 18)
/*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_up,0,*/
for (j=0; j < num_grid_sweeps[2]; j++)
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid],
rlx_up,rlx_order,2,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
U_array[fine_grid], Vtemp, Ztemp);
else if (rlx_order)
{
HYPRE_Int loc_relax_points[2];
loc_relax_points[0] = -1;
loc_relax_points[1] = 1;
for (j=0; j < num_grid_sweeps[2]; j++)
for (i=0; i < 2; i++)
hypre_ParCSRRelax_L1_Jacobi(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid],
loc_relax_points[i],
1.0,
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
U_array[fine_grid], Vtemp);
}
else
for (j=0; j < num_grid_sweeps[2]; j++)
hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid],
1, 1,
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
1.0, 1.0 ,0,0,0,0,
U_array[fine_grid], Vtemp, Ztemp);
}
else /* additive version */
{
alpha = 1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid],
U_array[coarse_grid],
beta, U_array[fine_grid]);
}
HYPRE_ANNOTATE_MGLEVEL_END(level);
}
HYPRE_ANNOTATE_FUNC_END;
return(Solve_err_flag);
}
HYPRE_Int hypre_CreateLambda(void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
MPI_Comm comm;
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParCSRMatrix *A_tmp;
hypre_ParCSRMatrix *Lambda;
hypre_CSRMatrix *L_diag;
hypre_CSRMatrix *L_offd;
hypre_ParCSRMatrix *Atilde;
hypre_CSRMatrix *Atilde_diag;
hypre_CSRMatrix *Atilde_offd;
HYPRE_Real *Atilde_diag_data;
HYPRE_Real *Atilde_offd_data;
hypre_CSRMatrix *A_tmp_diag;
hypre_CSRMatrix *A_tmp_offd;
hypre_ParVector *Xtilde;
hypre_ParVector *Rtilde;
hypre_Vector *Xtilde_local;
hypre_Vector *Rtilde_local;
hypre_ParCSRCommPkg *comm_pkg;
hypre_ParCSRCommPkg *L_comm_pkg = NULL;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Real *L_diag_data;
HYPRE_Real *L_offd_data;
HYPRE_Real *buf_data = NULL;
HYPRE_Real *tmp_data;
HYPRE_Real *x_data;
HYPRE_Real *r_data;
hypre_Vector *l1_norms;
HYPRE_Real *A_tmp_diag_data;
HYPRE_Real *A_tmp_offd_data;
HYPRE_Real *D_data = NULL;
HYPRE_Real *D_data_offd = NULL;
HYPRE_Int *L_diag_i;
HYPRE_Int *L_diag_j;
HYPRE_Int *L_offd_i;
HYPRE_Int *L_offd_j;
HYPRE_Int *Atilde_diag_i;
HYPRE_Int *Atilde_diag_j;
HYPRE_Int *Atilde_offd_i;
HYPRE_Int *Atilde_offd_j;
HYPRE_Int *A_tmp_diag_i;
HYPRE_Int *A_tmp_offd_i;
HYPRE_Int *A_tmp_diag_j;
HYPRE_Int *A_tmp_offd_j;
HYPRE_Int *L_recv_ptr = NULL;
HYPRE_Int *L_send_ptr = NULL;
HYPRE_Int *L_recv_procs = NULL;
HYPRE_Int *L_send_procs = NULL;
HYPRE_Int *L_send_map_elmts = NULL;
HYPRE_Int *recv_procs;
HYPRE_Int *send_procs;
HYPRE_Int *send_map_elmts;
HYPRE_Int *send_map_starts;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *all_send_procs = NULL;
HYPRE_Int *all_recv_procs = NULL;
HYPRE_Int *remap = NULL;
HYPRE_Int *level_start;
HYPRE_Int addlvl;
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int num_levels;
HYPRE_Int num_add_lvls;
HYPRE_Int num_procs;
HYPRE_Int num_sends, num_recvs;
HYPRE_Int num_sends_L = 0;
HYPRE_Int num_recvs_L = 0;
HYPRE_Int send_data_L = 0;
HYPRE_Int num_rows_L = 0;
HYPRE_Int num_rows_tmp = 0;
HYPRE_Int num_cols_offd_L = 0;
HYPRE_Int num_cols_offd = 0;
HYPRE_Int level, i, j, k;
HYPRE_Int this_proc, cnt, cnt_diag, cnt_offd;
HYPRE_Int A_cnt_diag, A_cnt_offd;
HYPRE_Int cnt_recv, cnt_send, cnt_row, row_start;
HYPRE_Int start_diag, start_offd, indx, cnt_map;
HYPRE_Int start, j_indx, index, cnt_level;
HYPRE_Int max_sends, max_recvs;
HYPRE_Int ns;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd;
hypre_Vector **l1_norms_ptr = NULL;
/*HYPRE_Real *relax_weight = NULL;
HYPRE_Int relax_type; */
HYPRE_Int add_rlx;
HYPRE_Int add_last_lvl, add_end;
HYPRE_Real add_rlx_wt;
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
additive = hypre_ParAMGDataAdditive(amg_data);
mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
/*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/
comm = hypre_ParCSRMatrixComm(A_array[0]);
add_rlx = hypre_ParAMGDataAddRelaxType(amg_data);
add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data);
ns = hypre_ParAMGDataNumGridSweeps(amg_data)[1];
hypre_MPI_Comm_size(comm,&num_procs);
l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data);
addlvl = hypre_max(additive, mult_additive);
if (add_last_lvl != -1) add_end = add_last_lvl+1;
else add_end = num_levels;
num_add_lvls = add_end+1-addlvl;
level_start = hypre_CTAlloc(HYPRE_Int, num_add_lvls+1, HYPRE_MEMORY_HOST);
send_data_L = 0;
num_rows_L = 0;
num_cols_offd_L = 0;
num_nonzeros_diag = 0;
num_nonzeros_offd = 0;
level_start[0] = 0;
cnt = 1;
max_sends = 0;
max_recvs = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp);
A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
num_cols_offd = hypre_CSRMatrixNumCols(A_tmp_offd);
num_rows_L += num_rows_tmp;
level_start[cnt] = level_start[cnt-1] + num_rows_tmp;
cnt++;
num_cols_offd_L += num_cols_offd;
num_nonzeros_diag += A_tmp_diag_i[num_rows_tmp];
num_nonzeros_offd += A_tmp_offd_i[num_rows_tmp];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
max_sends += num_sends;
if (num_sends)
send_data_L += hypre_ParCSRCommPkgSendMapStart(comm_pkg,num_sends);
max_recvs += hypre_ParCSRCommPkgNumRecvs(comm_pkg);
}
}
if (max_sends >= num_procs ||max_recvs >= num_procs)
{
max_sends = num_procs;
max_recvs = num_procs;
}
if (max_sends) all_send_procs = hypre_CTAlloc(HYPRE_Int, max_sends, HYPRE_MEMORY_HOST);
if (max_recvs) all_recv_procs = hypre_CTAlloc(HYPRE_Int, max_recvs, HYPRE_MEMORY_HOST);
cnt_send = 0;
cnt_recv = 0;
if (max_sends || max_recvs)
{
if (max_sends < num_procs && max_recvs < num_procs)
{
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
for (j = 0; j < num_sends; j++)
all_send_procs[cnt_send++] = send_procs[j];
for (j = 0; j < num_recvs; j++)
all_recv_procs[cnt_recv++] = recv_procs[j];
}
}
if (max_sends)
{
hypre_qsort0(all_send_procs, 0, max_sends-1);
num_sends_L = 1;
this_proc = all_send_procs[0];
for (i=1; i < max_sends; i++)
{
if (all_send_procs[i] > this_proc)
{
this_proc = all_send_procs[i];
all_send_procs[num_sends_L++] = this_proc;
}
}
L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST);
for (j=0; j < num_sends_L; j++)
L_send_procs[j] = all_send_procs[j];
hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST);
}
if (max_recvs)
{
hypre_qsort0(all_recv_procs, 0, max_recvs-1);
num_recvs_L = 1;
this_proc = all_recv_procs[0];
for (i=1; i < max_recvs; i++)
{
if (all_recv_procs[i] > this_proc)
{
this_proc = all_recv_procs[i];
all_recv_procs[num_recvs_L++] = this_proc;
}
}
L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST);
for (j=0; j < num_recvs_L; j++)
L_recv_procs[j] = all_recv_procs[j];
hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST);
}
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST);
L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST);
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
}
else
{
num_sends = 0;
num_recvs = 0;
}
for (k = 0; k < num_sends; k++)
{
this_proc = hypre_BinarySearch(L_send_procs,send_procs[k],num_sends_L);
L_send_ptr[this_proc+1] += send_map_starts[k+1]-send_map_starts[k];
}
for (k = 0; k < num_recvs; k++)
{
this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[k],num_recvs_L);
L_recv_ptr[this_proc+1] += recv_vec_starts[k+1]-recv_vec_starts[k];
}
}
L_recv_ptr[0] = 0;
for (i=1; i < num_recvs_L; i++)
L_recv_ptr[i+1] += L_recv_ptr[i];
L_send_ptr[0] = 0;
for (i=1; i < num_sends_L; i++)
L_send_ptr[i+1] += L_send_ptr[i];
}
else
{
num_recvs_L = 0;
num_sends_L = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
for (j = 0; j < num_sends; j++)
{
this_proc = send_procs[j];
if (all_send_procs[this_proc] == 0)
num_sends_L++;
all_send_procs[this_proc] += send_map_starts[j+1]-send_map_starts[j];
}
for (j = 0; j < num_recvs; j++)
{
this_proc = recv_procs[j];
if (all_recv_procs[this_proc] == 0)
num_recvs_L++;
all_recv_procs[this_proc] += recv_vec_starts[j+1]-recv_vec_starts[j];
}
}
}
if (max_sends)
{
L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST);
L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST);
num_sends_L = 0;
for (j=0; j < num_procs; j++)
{
this_proc = all_send_procs[j];
if (this_proc)
{
L_send_procs[num_sends_L++] = j;
L_send_ptr[num_sends_L] = this_proc + L_send_ptr[num_sends_L-1];
}
}
}
if (max_recvs)
{
L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST);
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST);
num_recvs_L = 0;
for (j=0; j < num_procs; j++)
{
this_proc = all_recv_procs[j];
if (this_proc)
{
L_recv_procs[num_recvs_L++] = j;
L_recv_ptr[num_recvs_L] = this_proc + L_recv_ptr[num_recvs_L-1];
}
}
}
}
}
if (max_sends) hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST);
if (max_recvs) hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST);
L_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag);
L_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd);
hypre_CSRMatrixInitialize(L_diag);
hypre_CSRMatrixInitialize(L_offd);
if (num_nonzeros_diag)
{
L_diag_data = hypre_CSRMatrixData(L_diag);
L_diag_j = hypre_CSRMatrixJ(L_diag);
}
L_diag_i = hypre_CSRMatrixI(L_diag);
if (num_nonzeros_offd)
{
L_offd_data = hypre_CSRMatrixData(L_offd);
L_offd_j = hypre_CSRMatrixJ(L_offd);
}
L_offd_i = hypre_CSRMatrixI(L_offd);
if (ns > 1)
{
Atilde_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag);
Atilde_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd);
hypre_CSRMatrixInitialize(Atilde_diag);
hypre_CSRMatrixInitialize(Atilde_offd);
if (num_nonzeros_diag)
{
Atilde_diag_data = hypre_CSRMatrixData(Atilde_diag);
Atilde_diag_j = hypre_CSRMatrixJ(Atilde_diag);
}
Atilde_diag_i = hypre_CSRMatrixI(Atilde_diag);
if (num_nonzeros_offd)
{
Atilde_offd_data = hypre_CSRMatrixData(Atilde_offd);
Atilde_offd_j = hypre_CSRMatrixJ(Atilde_offd);
}
Atilde_offd_i = hypre_CSRMatrixI(Atilde_offd);
}
if (num_rows_L) D_data = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST);
if (send_data_L)
{
L_send_map_elmts = hypre_CTAlloc(HYPRE_Int, send_data_L, HYPRE_MEMORY_HOST);
buf_data = hypre_CTAlloc(HYPRE_Real, send_data_L, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_L)
{
D_data_offd = hypre_CTAlloc(HYPRE_Real, num_cols_offd_L, HYPRE_MEMORY_HOST);
/*L_col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L);*/
remap = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L, HYPRE_MEMORY_HOST);
}
Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Rtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Rtilde_local);
hypre_ParVectorLocalVector(Rtilde) = Rtilde_local;
hypre_ParVectorOwnsData(Rtilde) = 1;
Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Xtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Xtilde_local);
hypre_ParVectorLocalVector(Xtilde) = Xtilde_local;
hypre_ParVectorOwnsData(Xtilde) = 1;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
cnt = 0;
cnt_level = 0;
cnt_diag = 0;
cnt_offd = 0;
cnt_row = 1;
L_diag_i[0] = 0;
L_offd_i[0] = 0;
if (ns > 1)
{
A_cnt_diag = 0;
A_cnt_offd = 0;
Atilde_diag_i[0] = 0;
Atilde_offd_i[0] = 0;
}
for (level=addlvl; level < add_end; level++)
{
row_start = level_start[cnt_level];
if (level != 0)
{
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(F_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[row_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0;
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(U_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[row_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0;
}
cnt_level++;
start_diag = L_diag_i[cnt_row-1];
start_offd = L_offd_i[cnt_row-1];
A_tmp = A_array[level];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp);
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd);
A_tmp_diag_j = hypre_CSRMatrixJ(A_tmp_diag);
A_tmp_offd_j = hypre_CSRMatrixJ(A_tmp_offd);
A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag);
A_tmp_offd_data = hypre_CSRMatrixData(A_tmp_offd);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
}
else
{
num_sends = 0;
num_recvs = 0;
}
/* Compute new combined communication package */
for (i=0; i < num_sends; i++)
{
this_proc = hypre_BinarySearch(L_send_procs,send_procs[i],num_sends_L);
indx = L_send_ptr[this_proc];
for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
L_send_map_elmts[indx++] = row_start + send_map_elmts[j];
}
L_send_ptr[this_proc] = indx;
}
cnt_map = 0;
for (i = 0; i < num_recvs; i++)
{
this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[i],num_recvs_L);
indx = L_recv_ptr[this_proc];
for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++)
{
remap[cnt_map++] = indx++;
}
L_recv_ptr[this_proc] = indx;
}
/* Compute Lambda */
if (add_rlx == 0)
{
/*HYPRE_Real rlx_wt = relax_weight[level];*/
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_data[i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]];
L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
if (ns > 1)
for (i=0; i < num_rows_tmp; i++)
{
Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
}
else
{
l1_norms = l1_norms_ptr[level];
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_data[i] = 1.0 / hypre_VectorData(l1_norms)[i];
L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
if (ns > 1)
{
for (i=0; i < num_rows_tmp; i++)
{
Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
}
}
if (num_procs > 1)
{
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_starts[i];
for (j=start; j < send_map_starts[i+1]; j++)
buf_data[index++] = D_data[send_map_elmts[j]];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg,
buf_data, D_data_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
for (i = 0; i < num_rows_tmp; i++)
{
j_indx = A_tmp_diag_i[i];
if (ns > 1)
{
Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j_indx];
Atilde_diag_j[A_cnt_diag++] = i+row_start;
}
L_diag_data[cnt_diag] = (2.0 - A_tmp_diag_data[j_indx]*D_data[i])*D_data[i];
L_diag_j[cnt_diag++] = i+row_start;
for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++)
{
j_indx = A_tmp_diag_j[j];
L_diag_data[cnt_diag] = (- A_tmp_diag_data[j]*D_data[j_indx])*D_data[i];
L_diag_j[cnt_diag++] = j_indx+row_start;
}
for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++)
{
j_indx = A_tmp_offd_j[j];
L_offd_data[cnt_offd] = (- A_tmp_offd_data[j]*D_data_offd[j_indx])*D_data[i];
L_offd_j[cnt_offd++] = remap[j_indx];
}
if (ns > 1)
{
for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++)
{
j_indx = A_tmp_diag_j[j];
Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j];
Atilde_diag_j[A_cnt_diag++] = j_indx+row_start;
}
for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++)
{
j_indx = A_tmp_offd_j[j];
Atilde_offd_data[A_cnt_offd] = A_tmp_offd_data[j];
Atilde_offd_j[A_cnt_offd++] = remap[j_indx];
}
}
}
cnt_row += num_rows_tmp;
}
if (L_send_ptr)
{
for (i=num_sends_L-1; i > 0; i--)
L_send_ptr[i] = L_send_ptr[i-1];
L_send_ptr[0] = 0;
}
else
L_send_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
if (L_recv_ptr)
{
for (i=num_recvs_L-1; i > 0; i--)
L_recv_ptr[i] = L_recv_ptr[i-1];
L_recv_ptr[0] = 0;
}
else
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
L_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgNumRecvs(L_comm_pkg) = num_recvs_L;
hypre_ParCSRCommPkgNumSends(L_comm_pkg) = num_sends_L;
hypre_ParCSRCommPkgRecvProcs(L_comm_pkg) = L_recv_procs;
hypre_ParCSRCommPkgSendProcs(L_comm_pkg) = L_send_procs;
hypre_ParCSRCommPkgRecvVecStarts(L_comm_pkg) = L_recv_ptr;
hypre_ParCSRCommPkgSendMapStarts(L_comm_pkg) = L_send_ptr;
hypre_ParCSRCommPkgSendMapElmts(L_comm_pkg) = L_send_map_elmts;
hypre_ParCSRCommPkgComm(L_comm_pkg) = comm;
Lambda = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDiag(Lambda) = L_diag;
hypre_ParCSRMatrixOffd(Lambda) = L_offd;
hypre_ParCSRMatrixCommPkg(Lambda) = L_comm_pkg;
hypre_ParCSRMatrixComm(Lambda) = comm;
hypre_ParCSRMatrixOwnsData(Lambda) = 1;
if (ns > 1)
{
/*hypre_ParCSRCommPkg *A_comm_pkg = NULL;
HYPRE_Int *A_recv_ptr = NULL;
HYPRE_Int *A_send_ptr = NULL;
HYPRE_Int *A_recv_procs = NULL;
HYPRE_Int *A_send_procs = NULL;
HYPRE_Int *A_send_map_elmts = NULL;
A_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
A_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
A_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
A_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST);
A_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST);
A_send_map_elmts = hypre_CTAlloc(HYPRE_Int, L_send_ptr[num_sends_L], HYPRE_MEMORY_HOST);
for (i=0; i<num_recvs_L+1; i++)
A_recv_ptr[i] = L_recv_ptr[i];
for (i=0; i<num_sends_L+1; i++)
A_send_ptr[i] = L_send_ptr[i];
for (i=0; i<num_recvs_L; i++)
A_recv_procs[i] = L_recv_procs[i];
for (i=0; i<num_sends_L; i++)
A_send_procs[i] = L_send_procs[i];
for (i=0; i < L_send_ptr[num_sends_L]; i++)
A_send_map_elmts[i] = L_send_map_elmts[i];
hypre_ParCSRCommPkgNumRecvs(A_comm_pkg) = num_recvs_L;
hypre_ParCSRCommPkgNumSends(A_comm_pkg) = num_sends_L;
hypre_ParCSRCommPkgRecvProcs(A_comm_pkg) = A_recv_procs;
hypre_ParCSRCommPkgSendProcs(A_comm_pkg) = A_send_procs;
hypre_ParCSRCommPkgRecvVecStarts(A_comm_pkg) = A_recv_ptr;
hypre_ParCSRCommPkgSendMapStarts(A_comm_pkg) = A_send_ptr;
hypre_ParCSRCommPkgSendMapElmts(A_comm_pkg) = A_send_map_elmts;
hypre_ParCSRCommPkgComm(A_comm_pkg) = comm; */
Atilde = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDiag(Atilde) = Atilde_diag;
hypre_ParCSRMatrixOffd(Atilde) = Atilde_offd;
hypre_ParCSRMatrixCommPkg(Atilde) = L_comm_pkg;
hypre_ParCSRMatrixComm(Atilde) = comm;
hypre_ParCSRMatrixOwnsData(Atilde) = 1;
hypre_ParAMGDataAtilde(amg_data) = Atilde;
}
hypre_ParAMGDataLambda(amg_data) = Lambda;
hypre_ParAMGDataRtilde(amg_data) = Rtilde;
hypre_ParAMGDataXtilde(amg_data) = Xtilde;
hypre_TFree(D_data_offd, HYPRE_MEMORY_HOST);
hypre_TFree(D_data, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(remap, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(level_start, HYPRE_MEMORY_HOST);
return Solve_err_flag;
}
HYPRE_Int hypre_CreateDinv(void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParCSRMatrix *A_tmp;
hypre_CSRMatrix *A_tmp_diag;
hypre_ParVector *Xtilde;
hypre_ParVector *Rtilde;
hypre_Vector *Xtilde_local;
hypre_Vector *Rtilde_local;
HYPRE_Real *x_data;
HYPRE_Real *r_data;
HYPRE_Real *tmp_data;
HYPRE_Real *D_inv = NULL;
/*HYPRE_Real *relax_weight = NULL;
HYPRE_Real relax_type;*/
HYPRE_Int addlvl;
HYPRE_Int num_levels;
HYPRE_Int num_rows_L;
HYPRE_Int num_rows_tmp;
HYPRE_Int level, i;
HYPRE_Int add_rlx;
HYPRE_Real add_rlx_wt;
HYPRE_Int add_last_lvl, add_end;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
hypre_Vector **l1_norms_ptr = NULL;
hypre_Vector *l1_norms;
HYPRE_Int l1_start;
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
addlvl = hypre_ParAMGDataSimple(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data);
add_rlx = hypre_ParAMGDataAddRelaxType(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
/*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/
l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data);
/* smooth_option = hypre_ParAMGDataSmoothOption(amg_data); */
if (add_last_lvl == -1 ) add_end = num_levels;
else add_end = add_last_lvl;
num_rows_L = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
num_rows_L += num_rows_tmp;
}
Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Rtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Rtilde_local);
hypre_ParVectorLocalVector(Rtilde) = Rtilde_local;
hypre_ParVectorOwnsData(Rtilde) = 1;
Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Xtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Xtilde_local);
hypre_ParVectorLocalVector(Xtilde) = Xtilde_local;
hypre_ParVectorOwnsData(Xtilde) = 1;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
D_inv = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST);
l1_start = 0;
for (level=addlvl; level < add_end; level++)
{
if (level != 0)
{
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(F_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[l1_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0;
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(U_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[l1_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0;
}
A_tmp = A_array[level];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
if (add_rlx == 0)
{
/*HYPRE_Real rlx_wt = relax_weight[level];*/
HYPRE_Int *A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
HYPRE_Real *A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_inv[l1_start+i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]];
}
}
else
{
l1_norms = l1_norms_ptr[level];
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_inv[l1_start+i] = 1.0 / hypre_VectorData(l1_norms)[i];
}
}
l1_start += num_rows_tmp;
}
hypre_ParAMGDataDinv(amg_data) = D_inv;
hypre_ParAMGDataRtilde(amg_data) = Rtilde;
hypre_ParAMGDataXtilde(amg_data) = Xtilde;
return Solve_err_flag;
}
|
IOLayersRules.h | // Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#ifndef INPUTLAYER_H
#define INPUTLAYER_H
// Rulebook Format
// rules[0][0] == mode
// rules[0][1] == maxActive per spatial location (==1 for modes 0,1,2)
// rules[0][2] == nInputRows
// rules[0][3] == nOutputRows
// rules[1] nOutputRows x (1+maxActive)
// mode 0==guaranteed unique 1==overwrite, 2=keep, 3=sum, 4=mean
template <Int dimension>
void inputLayerRules(SparseGrids<dimension> &SGs, RuleBook &rules, long *coords,
Int nInputRows, Int nInputColumns, Int batchSize, Int mode,
Int &nActive) {
assert(nActive == 0);
assert(rules.size() == 0);
assert(SGs.size() == 0);
SGs.resize(batchSize); // Set a minimum batch size if necessary
Point<dimension> p;
if (mode == 0) {
nActive = nInputRows;
rules.resize(1);
rules[0].push_back(mode);
rules[0].push_back(1);
rules[0].push_back(nInputRows);
rules[0].push_back(nInputRows);
if (nInputColumns == dimension) {
SGs.resize(1);
auto &sg = SGs[0];
for (Int i = 0; i < nInputRows; ++i) {
for (Int j = 0; j < dimension; j++)
p[j] = coords[j];
coords += dimension;
sg.mp[p] = i;
}
} else { // nInputColumns == dimension + 1
Int idx;
for (Int i = 0; i < nInputRows; ++i) {
for (Int j = 0; j < dimension; j++)
p[j] = coords[j];
idx = coords[dimension];
coords += dimension + 1;
if (idx + 1 >= (Int)SGs.size())
SGs.resize(idx + 1);
SGs[idx].mp[p] = i;
}
}
return;
}
// Compile list of how input rows correspond to output rows
std::vector<std::vector<Int>> outputRows;
if (nInputColumns == dimension) {
SGs.resize(1);
auto &sg = SGs[0];
for (Int i = 0; i < nInputRows; ++i) {
for (Int j = 0; j < dimension; j++)
p[j] = coords[j];
coords += dimension;
if (sg.mp.insert(make_pair(p, nActive)).second) {
outputRows.resize(++nActive);
}
outputRows[sg.mp[p]].push_back(i);
}
} else { // nInputColumns == dimension + 1
Int idx;
for (Int i = 0; i < nInputRows; ++i) {
for (Int j = 0; j < dimension; j++)
p[j] = coords[j];
idx = coords[dimension];
coords += dimension + 1;
if (idx + 1 >= (Int)SGs.size())
SGs.resize(idx + 1);
auto &sg = SGs[idx];
if (sg.mp.insert(make_pair(p, nActive)).second) {
outputRows.resize(++nActive);
}
outputRows[sg.mp[p]].push_back(i);
}
}
rules.resize(2);
rules[0].push_back(mode);
rules[0].push_back(1); // replace with maxActive if mode==3 or 4
rules[0].push_back(nInputRows);
rules[0].push_back(outputRows.size());
auto &rule = rules[1];
if (mode == 1) {
for (Int i = 0; i < nActive; ++i) {
rule.push_back(1);
rule.push_back(outputRows[i].front());
}
}
if (mode == 2) {
for (Int i = 0; i < nActive; ++i) {
rule.push_back(1);
rule.push_back(outputRows[i].back());
}
}
if (mode == 3 or mode == 4) {
Int maxActive = 0;
for (auto &row : outputRows)
maxActive = std::max(maxActive, (Int)row.size());
rules[0][1] = maxActive;
for (auto &row : outputRows) {
rule.push_back(row.size());
for (auto &r : row)
rule.push_back(r);
rule.resize((rule.size() + maxActive) / (maxActive + 1) *
(maxActive + 1));
}
}
}
// Rulebook Format
// rules[0][0] == mode
// rules[0][1] == maxActive per spatial location (==1 for modes 0,1,2)
// rules[0][2] == batchSize
// rules[0][3] == length
// rules[0][4] == nOutputRows
// rules[1] nOutputRows x (1+maxActive)
// bl is a batchSize x length x dimension long array of coordinates
// mode 0==guaranteed unique and all present; 1==overwrite, 2=keep, 3=sum,
// 4=mean
template <Int dimension>
void blRules(SparseGrids<dimension> &SGs, RuleBook &rules, long *coords,
Int batchSize, Int length, Int mode, Int &nActive) {
assert(nActive == 0);
assert(rules.size() == 0);
assert(SGs.size() == 0);
SGs.resize(batchSize);
Int I;
if (mode == 0) {
nActive = batchSize * length;
rules.resize(1);
rules[0].push_back(mode);
rules[0].push_back(1);
rules[0].push_back(batchSize);
rules[0].push_back(length);
rules[0].push_back(nActive);
#pragma omp parallel for private(I)
for (I = 0; I < batchSize; I++) {
auto &sg = SGs[I];
sg.ctr = I * length;
auto c = coords + I * length * dimension;
Point<dimension> p;
for (Int l = 0; l < length; ++l) {
for (Int j = 0; j < dimension; ++j)
p[j] = c[j];
c += dimension;
sg.mp[p] = l;
}
}
return;
}
if (mode <= 2) {
// Compile list of how input rows correspond to output rows
std::vector<std::vector<Int>> outputRows(batchSize);
std::vector<Int> nActives(batchSize);
#pragma omp parallel for private(I)
for (I = 0; I < batchSize; I++) {
auto &sg = SGs[I];
auto &ors = outputRows[I];
auto &nAct = nActives[I];
auto c = coords + I * length * dimension;
Int i = I * length;
Point<dimension> p;
if (mode == 1) {
for (Int l = 0; l < length; ++l, ++i) {
for (Int j = 0; j < dimension; ++j)
p[j] = *c++;
if (p[0] >= 0) {
if (sg.mp.insert(make_pair(p, nAct)).second) {
nAct++;
ors.push_back(i);
} else {
ors[sg.mp[p]] = i;
}
}
}
}
if (mode == 2) {
for (Int l = 0; l < length; ++l, ++i) {
for (Int j = 0; j < dimension; ++j)
p[j] = *c++;
if (p[0] >= 0) {
if (sg.mp.insert(make_pair(p, nAct)).second) {
nAct++;
ors.push_back(i);
}
}
}
}
}
for (I = 0; I < batchSize; I++) {
SGs[I].ctr = nActive;
nActive += nActives[I];
}
Int maxActive = 1;
rules.resize(2);
rules[0].push_back(mode);
rules[0].push_back(maxActive);
rules[0].push_back(batchSize);
rules[0].push_back(length);
rules[0].push_back(nActive);
auto &rule = rules[1];
rule.resize(2 * nActive);
#pragma omp parallel for private(I)
for (I = 0; I < batchSize; I++) {
auto &ors = outputRows[I];
auto rr = &rule[SGs[I].ctr * 2];
for (auto &row : ors) {
rr[0] = 1;
rr[1] = row;
rr += 2;
}
}
return;
}
if (mode == 3 or mode == 4) {
// Compile list of how input rows correspond to output rows
std::vector<std::vector<std::vector<Int>>> outputRows(batchSize);
std::vector<Int> nActives(batchSize);
#pragma omp parallel for private(I)
for (I = 0; I < batchSize; I++) {
auto &sg = SGs[I];
auto &ors = outputRows[I];
auto &nAct = nActives[I];
auto c = coords + I * length * dimension;
Int i = I * length;
Point<dimension> p;
for (Int l = 0; l < length; ++l, ++i) {
for (Int j = 0; j < dimension; ++j)
p[j] = *c++;
if (p[0] >= 0) {
if (sg.mp.insert(make_pair(p, nAct)).second) {
nAct++;
ors.resize(nAct);
}
ors[sg.mp[p]].push_back(i);
}
}
}
for (I = 0; I < batchSize; I++) {
SGs[I].ctr = nActive;
nActive += nActives[I];
}
Int maxActive = 1;
if (mode >= 3)
for (auto &ors : outputRows)
for (auto &row : ors)
maxActive = std::max(maxActive, (Int)row.size());
rules.resize(2);
rules[0].push_back(mode);
rules[0].push_back(maxActive);
rules[0].push_back(batchSize);
rules[0].push_back(length);
rules[0].push_back(nActive);
auto &rule = rules[1];
rule.resize((maxActive + 1) * nActive);
#pragma omp parallel for private(I)
for (I = 0; I < batchSize; I++) {
auto &ors = outputRows[I];
auto rr = &rule[SGs[I].ctr * (maxActive + 1)];
for (auto &row : ors) {
rr[0] = row.size();
for (Int i = 0; i < (Int)row.size(); ++i)
rr[i + 1] = row[i];
rr += 1 + maxActive;
}
}
}
}
#endif /* INPUTLAYER_H */
|
GB_unop__asin_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__asin_fp64_fp64)
// op(A') function: GB (_unop_tran__asin_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = asin (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = asin (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = asin (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__asin_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = asin (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = asin (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__asin_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
patch-src_algo_blast_core_blast__kappa.c | $NetBSD: patch-src_algo_blast_core_blast__kappa.c,v 1.2 2019/01/07 15:00:10 bacon Exp $
# Work around NetBSD stderr def choking macro
--- src/algo/blast/core/blast_kappa.c.orig 2019-01-02 04:00:17.000000000 +0000
+++ src/algo/blast/core/blast_kappa.c
@@ -32,6 +32,7 @@
* system for each match in blastpgp
*/
+#include <stdio.h>
#include <float.h>
#include <algo/blast/core/ncbi_math.h>
#include <algo/blast/core/blast_hits.h>
@@ -57,7 +58,7 @@
* shared(...) list. */
# define STDERR_COMMA
# else
-# define STDERR_COMMA stderr,
+# define STDERR_COMMA my_stderr,
# endif
#endif
@@ -3412,6 +3413,8 @@ Blast_RedoAlignmentCore_MT(EBlastProgram
}
Boolean interrupt = FALSE;
+ /* NetBSD defines stderr as (&__sF[2]), which the macro below chokes on */
+ FILE *my_stderr = stderr;
#pragma omp parallel \
default(none) num_threads(actual_num_threads) \
if(actual_num_threads>1) \
|
1.c | #include <stdio.h>
int main()
{
#pragma omp parallel
{
printf(" Hello ");
}
printf("\n\n GoodBye – Team Destroyed – Exiting Program \n\n");
}
|
bicg.c | /**
* bicg.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
#define NX SIZE
#define NY SIZE
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r) {
int i, j;
for (i = 0; i < NX; i++) {
r[i] = i * M_PI;
for (j = 0; j < NY; j++) {
A[i * NY + j] = ((DATA_TYPE)i * j) / NX;
}
}
for (i = 0; i < NY; i++) {
p[i] = i * M_PI;
}
}
int compareResults(DATA_TYPE *s, DATA_TYPE *s_outputFromGpu, DATA_TYPE *q,
DATA_TYPE *q_outputFromGpu) {
int i, fail;
fail = 0;
// Compare s with s_cuda
for (i = 0; i < NX; i++) {
if (percentDiff(q[i], q_outputFromGpu[i]) > ERROR_THRESHOLD) {
fail++;
}
}
for (i = 0; i < NY; i++) {
if (percentDiff(s[i], s_outputFromGpu[i]) > ERROR_THRESHOLD) {
fail++;
}
}
return fail;
}
void bicg(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s, DATA_TYPE *p,
DATA_TYPE *q) {
int i, j;
for (i = 0; i < NY; i++) {
s[i] = 0.0;
}
for (i = 0; i < NX; i++) {
q[i] = 0.0;
for (j = 0; j < NY; j++) {
s[j] = s[j] + r[i] * A[i * NY + j];
q[i] = q[i] + A[i * NY + j] * p[j];
}
}
}
void bicg_OMP(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s, DATA_TYPE *p,
DATA_TYPE *q) {
int i, j;
for (i = 0; i < NY; i++) {
s[i] = 0.0;
}
#pragma omp target teams map(to : A[ : NX *NY], p[ : NY], r[ : NX]) map(tofrom : s[ : NY], q[ : NX]) device(OMP_DEVICE_ID)
{
#pragma omp distribute parallel for private(i)
for (j = 0; j < NY; j++) {
LLVM_MCA_BEGIN("kernel");
for (i = 0; i < NX; i++) {
s[j] = s[j] + r[i] * A[i * NY + j];
}
LLVM_MCA_END("kernel");
}
#pragma omp distribute parallel for private(j)
for (i = 0; i < NX; i++) {
q[i] = 0.0;
for (j = 0; j < NY; j++) {
q[i] = q[i] + A[i * NY + j] * p[j];
}
}
}
}
int main(int argc, char **argv) {
fprintf(stdout, "<< BiCG Sub Kernel of BiCGStab Linear Solver >>\n");
// declare arrays and allocate memory
DATA_TYPE *A = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
DATA_TYPE *r = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE));
DATA_TYPE *p = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE));
DATA_TYPE *s = NULL;
DATA_TYPE *s_OMP = NULL;
DATA_TYPE *q = NULL;
DATA_TYPE *q_OMP = NULL;
// initialize arrays
init_array(A, p, r);
// run OMP on GPU or CPU if enabled
#if defined(RUN_OMP_GPU) || defined(RUN_OMP_CPU)
s_OMP = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE));
q_OMP = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE));
BENCHMARK_OMP(bicg_OMP(A, r, s_OMP, p, q_OMP));
// prevent dead code elimination
DCE_PREVENT(s_OMP, NY);
DCE_PREVENT(q_OMP, NX);
#endif
// run sequential version if enabled
#ifdef RUN_CPU_SEQ
s = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE));
q = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE));
BENCHMARK_CPU(bicg(A, r, s, p, q));
// prevent dead code elimination
DCE_PREVENT(s, NY);
DCE_PREVENT(q, NX);
#endif
int fail = 0;
// if test mode enabled, compare the results
#ifdef RUN_TEST
fail = compareResults(s, s_OMP, q, q_OMP);
printf("Errors on OMP (threshold %4.2lf): %d\n", ERROR_THRESHOLD, fail);
#endif
// Release memory
free(A);
free(r);
free(s);
free(p);
free(q);
free(s_OMP);
free(q_OMP);
return fail;
}
|
NMTreeNeighbourIndex.h | #pragma once
#include <array>
#include <map>
#include "NMTree.h"
//This class indexes all leaves.
//the basic idea is from this paper
//https://pdfs.semanticscholar.org/cec7/b45bca54d2ce5424bf9e7c61e954153f1ce0.pdf
//Cardinal Neighbor Quadtree: a New Quadtree-based Structure for Constant - Time Neighbor Finding
//Safwan W. Qasem, Ameur A. Touir
//because of lazyness it works a little bit differernt
//for greater and equal sized neighbours one neighbour is saved
//for smaller and therefor more neighbours the neighbour with the same size is saved
//on request of all neighbours, when they are smaller, the non leaf node is expanded to the correct border
namespace Iyathuum {
template <typename Content, size_t ArraySize, size_t Dimension, TreeMergeBehavior Merge, typename Scalar = size_t>
class NMTreeNeighbourIndex {
using Tree = NMTree<Content, ArraySize, Dimension, Merge, Scalar>;
public:
NMTreeNeighbourIndex(Tree* root) {
_root = root;
}
void init() {
auto leafs = _root->getLeafs();
int batchSize = 1000;
int64_t numberOfBatches = (leafs.size() / batchSize) + 1;
std::vector<std::vector<Neighbourhood>> result;
result.resize(numberOfBatches);
//get one neighbour
#pragma omp parallel for
for (int64_t batch = 0; batch < batchSize; batch++) {
for (size_t i = batchSize * batch; i < batchSize * (batch+1) && i<leafs.size(); i++) {
Neighbourhood n;
n._target = leafs[i].link;
for (size_t dim = 0; dim < Dimension; dim++){
for (auto dir : { NMTreeDirection::Negative,NMTreeDirection::Positive }) {
n.get(dim, dir) = getNeighbour(n._target, dim, dir);
}
}
result[batch].push_back(n);
}
}
//merge everything
for (size_t batch = 0; batch < result.size(); batch++) {
for (size_t i = 0; i < result[batch].size(); i++)
_neighbourhood[result[batch][i]._target] = result[batch][i];
}
}
std::set<Tree*> getAllNeighbours(Tree* node) {
std::set<Tree*> result;
for(size_t dimension = 0;dimension < Dimension;dimension++)
for(auto dir : {NMTreeDirection::Negative, NMTreeDirection::Positive}){
std::set<Tree*> subresult = getNeighbours(node, dimension, dir);
result.insert(subresult.begin(), subresult.end());
}
return result;
}
std::set<Tree*> getNeighbours(Tree* node, size_t dimension, NMTreeDirection dir) {
std::set<Tree*> result;
Tree* first = _neighbourhood[node].get(dimension, dir);
if (first == nullptr)
return {};
else if (first->isLeaf())
return { first };
else {
std::vector<Tree*> result;
result.push_back(first);
size_t currentPosition = 0;
//expand border slices
while (currentPosition < result.size()) {
if (!result[currentPosition]->isLeaf()) {
Tree* current = result[currentPosition];
result.erase(result.begin() + currentPosition);
std::set<Tree*> side = current->getSide(dimension, (dir == NMTreeDirection::Negative)?NMTreeDirection::Positive:NMTreeDirection::Negative);
result.insert(result.begin(),side.begin(), side.end());
continue;
}
currentPosition++;
}
return std::set<Tree*>(result.begin(), result.end());
}
}
private:
Tree* getNeighbour(Tree* node, size_t dimension, NMTreeDirection dir) {
Tree* root = nullptr;
std::vector<std::array<size_t, Dimension>> pathToNeighbour = getPathToNeighbour(node, dimension, dir, root);
if (pathToNeighbour.size() == 0) return nullptr;
Tree* current = root;
for (int64_t i = (int64_t)(pathToNeighbour.size()-1); i >= 0; i--) {
if (current->isLeaf()) {
return current;
}
else {
Tree* next = current->getChild(pathToNeighbour[i]);
current = next;
}
}
//not leafs are expanded to real neighbours in the question function to save memory
return current;
}
std::vector<std::array<size_t, Dimension>> getPathToNeighbour(Tree* node, size_t dimension, Iyathuum::NMTreeDirection dir, Tree*& root) {
//modifys the path so that it should point to its neighbour at the same height
std::vector<std::array<size_t, Dimension>> input;
getOwnPath(node,input, root);
std::vector<std::array<size_t, Dimension>> result;
for (size_t currentPosition = 0; currentPosition < input.size(); currentPosition++) {
if (input[currentPosition][dimension] == 0 && dir == NMTreeDirection::Negative) {
std::array<size_t, Dimension> newPath = input[currentPosition];
newPath[dimension] = ArraySize - 1;
result.push_back(newPath);
}
else if (input[currentPosition][dimension] == ArraySize - 1 && dir == NMTreeDirection::Positive) {
std::array<size_t, Dimension> newPath = input[currentPosition];
newPath[dimension] = 0;
result.push_back(newPath);
}
else {
std::array<size_t, Dimension> newPath = input[currentPosition];
newPath[dimension] += (dir==NMTreeDirection::Positive) ? 1 : -1;
result.push_back(newPath);
for (size_t p = currentPosition + 1; p < input.size(); p++)
result.push_back(input[p]);
return result;
}
}
return {};//no path found
}
//returns the path to this element
void getOwnPath(Tree* node, std::vector<std::array<size_t, Dimension>>& path, Tree*& root) {
if (!node->getParent()) {
root = node;
return;
}
std::array<size_t, Dimension> myPos = node->getPosition();
for (size_t i = 0; i < Dimension; i++) myPos[i] = (myPos[i] - node->getParent()->getPosition()[i]) / node->getSize();
path.push_back(myPos);
getOwnPath(node->getParent(),path,root);
}
struct Neighbourhood {
//dim0-,dim0+,dim1-,dim1+,dim2-,dim3+,....
//x-,x+,y-,y+,z-,z+
Tree*& get(size_t dimension, NMTreeDirection dir) {
size_t index = 2 * dimension + ((dir == NMTreeDirection::Negative) ? 0 : 1);
return _neighbourhood[index];
}
std::array<Tree*, 2 * Dimension> _neighbourhood; //at least for 4 dimensions correct.
Tree* _target;
Neighbourhood() {
for (size_t i = 0; i < 2 * Dimension; i++)
_neighbourhood[i] = nullptr;
}
};
std::map< Tree*, Neighbourhood> _neighbourhood;
Tree* _root;
};
} |
pr35625.c | /* PR libgomp/35625 */
/* { dg-do run } */
/* { dg-options "-std=c99" } */
int
main (void)
{
#pragma omp parallel
{
#pragma omp for schedule (guided, 10)
for (int i = 0; i < 1826; i += 10)
;
#pragma omp for schedule (guided, 10)
for (int i = 0; i > -1826; i -= 10)
;
}
return 0;
}
|
GB_unaryop__identity_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp32_fp32
// op(A') function: GB_tran__identity_fp32_fp32
// C type: float
// A type: float
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__isfinite_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isfinite_bool_fp64)
// op(A') function: GB (_unop_tran__isfinite_bool_fp64)
// C type: bool
// A type: double
// cast: double cij = (aij)
// unaryop: cij = isfinite (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isfinite (x) ;
// casting
#define GB_CAST(z, aij) \
double z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (aij) ; \
Cx [pC] = isfinite (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isfinite_bool_fp64)
(
bool *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isfinite_bool_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pfx_fmt_plug.c | /* pfx cracker patch for JtR. Hacked together during June of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2021, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Generating pfx files:
*
* keytool -genkeypair -alias my_certificate -keystore my_keystore.pfx -storepass
* my_password -validity 365 -keyalg RSA -keysize 2048 -storetype pkcs12 */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pfx;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pfx);
#else
#include <openssl/opensslv.h>
#include <openssl/crypto.h>
#include <openssl/ssl.h>
#include <openssl/bio.h>
#include <openssl/err.h>
#include <openssl/crypto.h>
#include <openssl/pkcs12.h>
#include <openssl/ssl.h>
#undef MEM_FREE
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 2 // tuned on core i7
#endif
#include <string.h>
#include "arch.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "misc.h"
#include "memory.h"
#include "memdbg.h"
#define FORMAT_LABEL "PFX"
#define FORMAT_NAME "PKCS12 (.pfx, .p12)"
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 0
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int any_cracked, *cracked;
static size_t cracked_size;
static struct custom_salt {
int len;
PKCS12 pfx;
} *cur_salt;
static struct fmt_tests pfx_tests[] = {
{"$pfx$*2136*3080020103308006092a864886f70d010701a0802480048207ff3080308006092a864886f70d010701a0802480048203283082032430820320060b2a864886f70d010c0a0102a08202ad308202a93023060a2a864886f70d010c0103301504104b93bf4f13111410dfcf885201edb561020101048202800f288d8419204192f2bf3da2d819984b0223dc9a4c5e00b023004b800e0b250e3fc76dc85d30a2c5b24f8a9a91131530e8c9b0c7702bc45048e799bebec05984f8dbc5f4193a396d6d7cc4e3fbc8001c6a661e434bbdcf3995e0d9191f35a629fb60e6a3e959457cd8eb67c20238448aa54f17ab691da76679388b6b2232a87c3c91f02859103810c36ec43be9ebd571309805bd9d0bf484b91403dc83de02e9af92c611a254ef1f1022058e9bacf9838a6f6e842fbc90adb55f132668ca4fcd4997873aa5901e8e29df1f01a37afd3964eaa2c7ebeb49c51daab12a7c81298d698c625ee9f15d617d77f29292b86c0b31815c94e3d3b569fc7ef64fdd0c83970e339266fdab76f3e9ddd75a086d3f8fe54e52d7ad34bdb34778e755705c87846ba489578fd187ef40e869935444dc2b9b83b769ddf3a40e481e1e2d33946fa77072cd0151e40e6e55b9c949996e501a591a8b34c8cfb38c669cc90e3ac805ef275b0d9a3d123d649b01f7e296f5bdb229ab62e71008b5c4434e98c50d5d575eae862fbe02ac8ae9003eb8353569110618098830b939db2c9992bef8b8d236c7f0dc59de3f65618ef5e313f9c8eae9d701e44baffc9b909fedd3d4b7283d41bac86121d140ef6877b2db1c168cf05507e1c2c6457405621e34c6fb316429fab6773c7336f32edcb72108de6b339c63124d0f491c87420b0511ad3efcfcdf421e9154381eb164716988dd511eb936423503a5e83a68b87892df1c4d4cfebffa0e856bf70a768ccf34770a6e4bb4d1c7d894a09949af40a458698df7e94335d718ff23cb040e7fc85d62086bfc74bc27d29aee4b0a7d8df41070efa9e165b57ca610b7b49afd688e5160d7b0888a99b04d1397f0b68217f677b78526ea7eacbafc3160303906092a864886f70d010914312c1e2a0043006500720074006900660069006300610064006f00200069006d0070006f0072007400610064006f302306092a864886f70d01091531160414478a62a291a103023605dfdec512ed2b65f57108000000000000308006092a864886f70d010706a0803080020100308006092a864886f70d0107013023060a2a864886f70d010c010630150410af99ed8b3a334491f93aeef557445292020101a08004820458853f6b251cbc854fa17ab64a8b1bf2692f54d1309c7fdfdf6e2a6a0e158851d09b665858f938c8ec79dd2d5754d44c2a25bb5d8ae078d20576418899adfe6cb5caeeaae18ea27e808c55cb9e2da0b9847991259fb0ffdaddb3d6e4ba5f9a93d7c0c1226c1b4620023c373c4ad006b80358b959099a3fe71668d4a36abda19659156b61dac0dc3e534ca40521f675b7b347db333a5ea41d06ffc5ab1dc29499b19825a2fa468b062c0a34893dd2fb2c623ef79802b7ab052d2a243431c01d483c8a524c6fcf0b3c929abd755d4291648302e8b8dab16864dcd16c4d35b464a510c39b1dd14c699ef8197515ce40d77404b223a05593a694bea4508926acd3b7bdacf3bfbb4a82e9a9046649bba3de4670ac8d67354ddbab948d16752be9e5f993fd9b9441b6277bf1a3badd4e92de126a8e40f507d67652710ceedd44bcd01a9b8a214b32c8943472c08d3e6fb051ca42d93ceb67e0056e8348bba31a84377121c7ea44bbda7dc02ce0738fb72636788bb4daa5007d2cb3f5709d8dfb4300e39a56d20282f65c834ba91a38672457d3d6b380918c783b37b42cf86ec5bfacead722f794fa597fa7f98a4ddd686898ad9a5e62301171bc7606fb48e0d10f52025f9aaf16ffc91c5a566a5df15c2fc65cee342a3a9e9e8d970b86771c0d5a540f19625bbd9985c561266da78a46b6cd31bf50735e87926ea702eb890c74b22f40837636bed76ce3fb233ee7bd51a0b90c75d9ae7c4fde52c4edf9a0e3943a09c965f7aa27cdbcd1828efc8e6bcbf1d5f9d896793e07991492fe007782d67cb9e9e963ca31efdd77ab94e6590c0fba5e5cf9bf1e8705b27fbc3e5a55cd38c7fd4728e8e3395e2b55962939cb43ba489984f832af201a1856c61561b77097f683733ca5f01ae51f21cdd2777eeba95ce7d0b7c92e16308fe2db0aca5299b904a8d6804de50e3ba19210267660ebb6e9e4fcf9edc574002250ea2ff1b6c3b6eaad6cbc8761b21b04db3bda3ae07e2de3c771239667fb86c55add6d93e3e84f37c0e90f81d2c4917b3014d04c046366ed4048c357f03f6a17bb14872d6965c58b15c22a3179aa1e38e0e0257ff52ab5fb0229c692ac5db11e21aed65ff698038d4c62d171d90f1b31232e5bc5185fc57c75543a0713e906792642164f74b2d840a112a41498030baf2975fd468eef5b36fd3663b05b985613fe0ef553882b7d599c3ddc4f8d47d4862ff6833918fdf506d30b51b730c4e7bbb3c3757d3dc3bf6ac6b604d496bf5e149d0b0d87f8dc0ca91eec77a707d9df278f4d932c42def034db887e0138fa1a42fb72f022864418c088954cfd10276bfa6c8d7380b8136053945e742be0f8c1cdf6be8b75ccaf28c5faf389f685ddefb37af74e1e8199ce72ea079de3ab04eaf78543103045f1b3b1632e1dd983ddd4508b9e8a32ede7b38170c84abdb33aa11067bfa5a3d595521130af3c3f6fb57cc0f5523a4ac3c6aa17fdcbbe8bf7a3356a1d726282c975c93575724865b5cd8ec6809fed65ae01284ccf200a33b0087162a94b78ad8f9551728c7df2789702054e4a62c30408a740e3557ac8a0b700000000000000000000000000000000000030353021300906052b0e03021a0500041475552f348f6570c4d5d17867ac9cfaef14d7c1df0410545367d2571128f17aec366b395a944d0000", "usr0052"},
{"$pfx$*2604*30820a28020103308209e206092a864886f70d010701a08209d3048209cf308209cb3082057806092a864886f70d010701a082056904820565308205613082055d060b2a864886f70d010c0a0102a08204fa308204f63028060a2a864886f70d010c0103301a0414e9a49f4190a3084e02ceba2f049303750f6646da02020400048204c8cd40bb89c287b9fe70a88825e33a648c76aa1b35d93131d445e48943ee50ff8a0aee6a0483a289fbacf21290a8553e3414ea6bd6b305407d709bbaf915a99430c998d9ba68e71f4036d42feb386061d645433390658df91bd4e9750a39f9288f7cf8001e2adc8e4d7480f1a5e2d63799df20d9eb956f86b33330ec2c206b1ae47cf54d9cf2cdd970664c251e64cc725456e2c14506cfd7d9ff1d2894a50093fff4f29d5967a0f788ed707ade93cb3ad7e87d96dad844d2037f4d5e863ec5170c0f1d4514d752a266cd4db49b63c5d86646e54a68187ddc99b00499286f79e2e7c54e30d3a1b1f035d7113180d072c7218d399f8b5427dc2d6fcb42518bd6bb97f74c97ea2358ef39fb176397fe7729cd5c3a474423f0a0e74a91c77bb27b24f82463081fed53bdf216840b2c60482846010b654e2c74db4abfb07936e0cc9d0d133ac7a4baa03091de25f6eea70d85fe9376349731ecc03fe437175101fd6698929f43a94835c6453b68335f478cfa1fab1ddf0236570ca5a07cebf1aa3c36d7804654a5eac8328377abba3b81627fcac7f1dbdb56ba1f0f861af9967c5d245459a81891fb5dd833f0bca633eb616cf10397b295d857c63501e85fb9f11f1fd3dd80baac425ecf0efa012817ca9b23e06575a3942613fad67b4bda4fabfd29bd1897b0623d6d47ec000bd656f5b7c78b9a4808ac022524b17a8df676b86dc29b6d008d09cb1148110bd07464c071504d7dae5803602247da1e4cd5d490771322d7eb568d0ad0293f4d2626ac0f60f568a92eccd097f6d5247e043b7cdb52ddfef0516e7053fb42b7d1b16564f1c862c1bf45436290a5dab1f0e90b24bdd4433ce0cbcc7b0eafc445dcc6fe8a52e606d3977ce6d9e44f037ea8dbf36bce63a877aaafde13b1bb5005856d315f30fd4feaf26ef8eeef899802aa2442364c147b074c64878a696a1f2cadd9bacb187b62c239c16f163d6c44e157dd8daa4610142eb40dadbc3405c4ade7d127db20bc4384bd1d4c2a2a5dc907aa0468c2485654bceeee3d4011d74e6e85ed88811ccf1cd6b3d5540c5709b8e14fb9e610b552502343ec739e8c9c6d6459062f76275de1fa1b24ed8a9924ea9176dfb89520b7fbec9e9968bd0320afc513e560966b524a82ef5a206f1823742e820bbbe6dca6b0a33c8f04208376bfd01f049f666c735b1efe2550a8601b1839bf045c56a9772a3e25235d2fb61f9007713ff57ae47f6335a44e6730bdaaebe833996aaaa78138ddb7d8719570a429debb8183fbd07f71a037335ec5b1d40c62f7163b85dc71d8db536c9092f155429b65ea81f8ff3c7892ebf881c107ea2c167df47d044ae7ed3fb5328d673753450c82d7049dfeaf1dde821a0ee0d6676a1656584cdbd4532f8d2493ea4794d88acacb147f19ca15777a67fe5031991ebc45ea43e87574f9d2f52de0722d6cc7f5b7a378a461148f1f7c5ee8bc7c7ae4fe80b4eed13b35d16906a084120c645812db0bd70e419c004512f284ab7635f17ee2ecc728aef2cda256b86fb4cc9d3e21736249735962d6ccd307a67fdbdb0815184f116eb1747de19449c6fb9410cb669fa2a3f2ab5ca16c3cca918555b583f61f2126aa0895ccdac7a5604ca1e84a76c15c508d620bb9037e5e5acf97e94438a059bc771d84dc1f63fd3f4780274a2f0a03f9b09a0cf4638e0c317f6ebb24f9062fe8c7023d4c06f3c67c9ac2008e8da33150302b06092a864886f70d010914311e1e1c006d0079005f00630065007200740069006600690063006100740065302106092a864886f70d0109153114041254696d6520313334303937373036353139303082044b06092a864886f70d010706a082043c308204380201003082043106092a864886f70d0107013028060a2a864886f70d010c0106301a04147d79e2d2b2986ea4d929b3ba8b956739a393b00802020400808203f82c0ebc2a236e5ffc4dff9e02344449f642fdf3b16da9b2e56d5a5e35f323b23b8ff915fbaf2ff70705465170ccd259a70bb1cde9f76e593f9a7a0d4764806dad2fa5c3b1ee2711e9dbbcaa874f8985f1b6c2ca1d55c919cf9e88aababe7826107cdb937e7cca57809b20a6351504ab688327e4df957a3c167772cf66aed6a2007ead81896465d4931efe7c3291a49761f1428c766fd82e1736218e90d9f8592475d164d9a79f3424cb6a543f7040d3f0dba6996d496f4f603b7d59527e5c9c89b3f96c55fa73b72385629cbd606cf9f88833db66bb1519dee62a0cd4989d93457fa1162b594b86bc7134c9aa530fe10d62b914f1818395f82d5224c3bc793a04b0ab41dc98694535f5bfbf2aa943d6c794f407e02248be842c55789091d1cc28bbfdf86bc1346142b057558ce1e64e38f8b2d7d68d539150f3de23f43d59637ae678f3687e69b52fdf46f54c32b84a658a2a69fb16da7ebb45ea84c9e38d6cedfc1227b86a6ea3094d0908d588213834192849fa5c25b2460bb22fdd9d9e317efaca646ea582ecb50f6a466f55ae38573afe904eadf42b6c596c8740dbf92cbd38c347624f3399ac2d20d0727f897f38417901dfdaa798631af8992fcad5d708882576036531d2deb867fe46d63921dc50b8c73fbc59586a861d7ae47c2a5ff892e9dffc6d8e6e8161506819ebc020cfb7bc4c1708832d53f8cc864012ab8379a1323e23b0edb5ffe48a942411cef6197f5545ae6822a3096db972f96d4d200ba600a1e95595d4532e7a9861b233f71ff37ea3c19143c87dd6d4a3f3186a7693dc11067c7b4c967984d4bbbf9d88acacb1ff3ba4536ea265a0503865d86af408748fe8191119cd7b570b5352f190265d5d468e911ba0020b526d3892119fda21243568cfa638251c9044c91a88d2f8a05dd0d90088b0b79ac2a2ca263aa108160a7f6943ce709a02743afb6e4ec9a7f7535635f839c2baf938418accec3d5c1ad2bbcec69ab337155bd0bb1b45c7e16e32f251d4da7796f013d6d502581853da6ab9736382115141886c14512fb5ca22e3e9e20366257579eb4225a6a3716457b9b1c0df63cb71a34b888de021f3520d62e96675ea8767e23d55b50e9aa40babafe398f5482c83f8caa57d7ed3486ce7dedace7158067194892defe38af28c1695cd6f14a1ddae959541fab3b59e72c17d2a67d980c749ef00b1f61ece68d81c79b4ec4f4d9eeaad43895a0dc9d86f4d7fe114f01189b3db72ee92963d4403c3aca8bf6d60ef7ee7fcd8102b3247048b4d517cd0ab76a0f8d68d33733934cb35a8e40d7de70c4f166c453fda74553069c51dd33f6f513bb9ef0a983187fc7d896c668590577a4e269688cc7b9fbd1f3fe77d3f431cf002043c43e1cae82b22018931f1337ee276d49c19163a866ef10a64ac5b013db1cb1c303d3021300906052b0e03021a05000414501f5cd8e454e44b6925715c4d2605a8d4ce70d00414456a2344e138862de7ad2e0b274952ef566e2b6302020400", "my_password"},
{"$pfx$*1702*308206a20201033082066806092a864886f70d010701a082065904820655308206513082032f06092a864886f70d010706a08203203082031c0201003082031506092a864886f70d010701301c060a2a864886f70d010c0103300e04086c933ea5111fd24602020800808202e83c56ad18c45e54aaca4d170750cfbfb3059d6cf161e49d379eab15e722216cb479eee8da7b6e6ffc89e01fbf30f4eb5e1b88ca146c166c700a68473d25a0979344cc60d1e58230a12d24b8be6e9174d3afecdf111cd7d96527831ac9c8f4bf3817cda021f34b61899f2a75fe511e8dedfb70367fa9902d2d3e500f853cc5a99ec8672a44713d24ae49382a20db6349bc48b23ad8d4be3aa31ba7e6d720590b5e4f6b0b5d84b7789ae9da7a80bfa3c27e507fc87e7bc943cff967db6b76f904ac52c1db5cfe9915fa3493cd42b8db6deae62bc01593e34bc8598f27a24cdfd242701ff72d997f959f3a933ab5a2762df33849c116715b78cb0d83267aff913619cbbdf003e13318e4b188a8a4f851b9f59ae2c71ab215c565f7872e5d54c06f92d6f59eaf19d95f9b4526d52d289cd17bc0c2079f9f13c20a70a566773d90ca6d888386d909b6362cb79e15cf547dceab1fe793c577b70f72463969f7b416fb5a6228053363558df18588b53406343ab320a1bbf1757b67ef8e3075f44dee4521f4a461d37ea894c940bc87f9bd33276f2843ff5922fd8e61d22a8619ad23154880fd7d957c0f151458fc4f686d96695a823b08c1795daaf79e41118a3c57ee065a693853a9c4b2004440662f51d63bb9973dc4bb8c541d424416c57d01a825be4d31dab7c7f4b2b738e4bbfdda1e3d3b95e026dadee4dfe155c0f4a24991693f679b452516bc19eab7cf7eb41b476358583d46630e8cda55974b8fcbe25b93e91e73f584a913149137c1c20d13f38826d8dba9bcf5504b8cee77e20a19d6fb050e9213b8aeb11c26a871c600701aea352ba2dcea15d8010d25034f64aa488b580b8282d226f8203bba6aa424b0a25bcceb9c7c718b6c276022d988ca063d2e88350d68903f95aa3265b44d909c07fa9477a5dfcfe3b5ed49b789d6e1c13aca012630343021dbc0c0f17dae6688eae495b76d21be49ced2c2e98e1068d8725d8a581958fb2530871dff1b3f910ae8beb3bc07bfb4b1d2d73fc5d440dc9bcd32ba656c32e357051bef3082031a06092a864886f70d010701a082030b0482030730820303308202ff060b2a864886f70d010c0a0102a08202a6308202a2301c060a2a864886f70d010c0103300e0408749558ace83617660202080004820280ef790b9cd427ec99a350a6e3afb1727cf3dd859d5377897805a7093e1ca42ab8cccc6c52d2b86d61ed55b5bd743fb2a4ec556b438933a9d97a55e5ad1fb3f9967e550be3d708feb5c7287e31afed165a4a91bd5a80292a1e061f97a8c11339963843348badf3fd898e89fd92bda5ad0195d8d4f75e7bce9f0518eeb85365860cd32ad5cea0958efef02bfb74aec0af0765729dae079f5eb08b099d3b06a9b9c6cd6f1e1e4170208ebec3c61ae3421e90cef0f2b5cd2187e43cc4ceecf4aec06340f886efb94f517e578d13659392246a69505de3914b719fba74709ef0f03f010429f899dbddab950f6e58462b2fe2663986a5e0c8ff235e89bca3bb6e41fcd602a0277a83822ac1a14101c83fd1cafdc45c1980ecf54ef092deb2fea736b428158e0847256fc1211f94ea8075145be5a5fb26206e125d55f45500844f1a83f063d0be19b60427dadbd89109bb9ee31a1ac79c863204e8e80c044b8b6bc45c756c26be514e4270a293faf4608065a27b4a51253cb9f831614d5c7f25ec1d4e36063e68e4e405c1f4deb98a786c57a376609441f2dcbe6393487b884624570f6cbb02b53f58ea4acb0faedd2931293dc87664a0c589322480686f6613ffb794c3b3b1872cd7a418712a35666b53bd8383f2e7aa6e8a9e20dd3d46cc3aaaaf17841732dde708ba5611ebcc8777fb3f7b65f2cf95992fdf4f5a17ddf01f3ebe5fb6c9cd58cb74553865cbec3c9d391dcc3e96e654faf7be7fdc8d5fb5dff98799e740147d2ca4b6df47560a4a20bd8f30cf5b495f4e919c9efad3aa59491a3e2ba4e53606e2016ce13e8271e70ccd5b57eec99a8604caf5997e648f3eb541769267f9cdf76aa84917ebd8a1f60a973ed22cca9fa0d3589bb77dafed82ea4f8cd19d3146301f06092a864886f70d01091431121e10006f00700065006e00770061006c006c302306092a864886f70d01091531160414a38a6be4b090be5e29259879b75e0e482f4a4dd830313021300906052b0e03021a05000414a790274918578289d80aa9fd0d526923f7b8f4d40408e861d3357729c35f02020800", "openwall"},
{NULL}
};
struct fmt_main fmt_pfx;
static void init(struct fmt_main *self)
{
/* OpenSSL init, cleanup part is left to OS */
SSL_load_error_strings();
SSL_library_init();
OpenSSL_add_all_algorithms();
#if defined(_OPENMP) && OPENSSL_VERSION_NUMBER >= 0x10000000
if (SSLeay() < 0x10000000) {
fprintf(stderr, "Warning: compiled against OpenSSL 1.0+, "
"but running with an older version -\n"
"disabling OpenMP for pfx because of thread-safety issues "
"of older OpenSSL\n");
fmt_pfx.params.min_keys_per_crypt =
fmt_pfx.params.max_keys_per_crypt = 1;
fmt_pfx.params.flags &= ~FMT_OMP;
}
else {
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
}
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc_tiny(cracked_size, MEM_ALIGN_WORD);
}
static int ishex(char *q)
{
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q;
}
static int isdec(char *q)
{
char buf[24];
int x = atoi(q);
sprintf(buf, "%d", x);
return !strcmp(q,buf) && *q != '-';
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *p, *keeptr, *decoded = NULL;
PKCS12 *p12 = NULL;
BIO *bp = NULL;
int len, i;
if (strncmp(ciphertext, "$pfx$", 5))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 6;
if ((p = strtok(ctcopy, "*")) == NULL) /* length */
goto err;
len = atoi(p);
if (!isdec(p))
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* data */
goto err;
if (!ishex(p))
goto err;
if(strlen(p) != len * 2)
goto err;
decoded = (char *) mem_alloc(len + 1);
for (i = 0; i < len; i++)
decoded[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
decoded[len] = 0;
bp = BIO_new(BIO_s_mem());
if (!bp)
goto err;
BIO_write(bp, decoded, len);
if(!(p12 = d2i_PKCS12_bio(bp, NULL)))
goto err;
if (bp) BIO_free(bp);
MEM_FREE(decoded);
MEM_FREE(keeptr);
return 1;
err:
if (bp) BIO_free(bp);
MEM_FREE(decoded);
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *decoded_data;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
static struct custom_salt cs;
PKCS12 *p12 = NULL;
BIO *bp;
ctcopy += 6; /* skip over "$pfx$*" */
p = strtok(ctcopy, "*");
cs.len = atoi(p);
decoded_data = (char *) mem_alloc(cs.len + 1);
p = strtok(NULL, "*");
for (i = 0; i < cs.len; i++)
decoded_data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
decoded_data[cs.len] = 0;
/* load decoded data into OpenSSL structures */
bp = BIO_new(BIO_s_mem());
if (!bp) {
fprintf(stderr, "OpenSSL BIO allocation failure\n");
exit(-2);
}
BIO_write(bp, decoded_data, cs.len);
if(!(p12 = d2i_PKCS12_bio(bp, NULL))) {
perror("Unable to create PKCS12 object from bio\n");
exit(-3);
}
/* save custom_salt information */
memset(&cs, 0, sizeof(cs));
memcpy(&cs.pfx, p12, sizeof(PKCS12));
BIO_free(bp);
MEM_FREE(decoded_data);
MEM_FREE(keeptr);
return (void *) &cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *) salt;
}
static void pfx_set_key(char *key, int index)
{
int len = strlen(key);
if (len > PLAINTEXT_LENGTH)
len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
saved_key[index][len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#if defined(_OPENMP) && OPENSSL_VERSION_NUMBER >= 0x10000000
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
if(PKCS12_verify_mac(&cur_salt->pfx, saved_key[index], -1)) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
struct fmt_main fmt_pfx = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if defined(_OPENMP) && OPENSSL_VERSION_NUMBER >= 0x10000000
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
pfx_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
pfx_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__ainv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_int8_int8)
// op(A') function: GB (_unop_tran__ainv_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
contract_graph.h | /******************************************************************************
* contract_graph.h
*
* Source of VieCut.
*
******************************************************************************
* Copyright (C) 2017-2018 Alexander Noe <alexander.noe@univie.ac.at>
*
* Published under the MIT license in the LICENSE file.
*****************************************************************************/
#pragma once
#include <algorithm>
#include <functional>
#include <memory>
#include <unordered_set>
#include <utility>
#include <vector>
#include "common/configuration.h"
#include "common/definitions.h"
#include "data-structures/definitions.h"
#include "data_structure/graph_access.h"
#include "data_structure/mutable_graph.h"
#include "parallel/data_structure/union_find.h"
#include "tlx/logger.hpp"
#include "tools/hash.h"
#include "tools/timer.h"
class contraction {
public:
static constexpr bool debug = false;
static graphAccessPtr deleteEdge(
graphAccessPtr, EdgeID) {
LOG1 << "DELETE EDGE NOT IMPLEMENTED YET";
exit(2);
}
static std::pair<graphAccessPtr, std::vector<NodeID> >
contractEdge(graphAccessPtr,
std::vector<NodeID>,
EdgeID) {
LOG1 << "CONTRACT EDGE NOT IMPLEMENTED YET";
exit(2);
}
static inline uint64_t get_uint64_from_pair(NodeID cluster_a,
NodeID cluster_b) {
if (cluster_a > cluster_b) {
std::swap(cluster_a, cluster_b);
}
return ((uint64_t)cluster_a << 32) | cluster_b;
}
static inline std::pair<NodeID, NodeID> get_pair_from_uint64(
uint64_t data) {
NodeID first = data >> 32;
NodeID second = data;
return std::make_pair(first, second);
}
template <class GraphPtr>
static void findTrivialCuts(GraphPtr G,
std::vector<NodeID>* m,
std::vector<std::vector<NodeID> >* rm,
NodeWeight target_mindeg) {
// create non-const references for better syntax
std::vector<NodeID>& mapping = *m;
std::vector<std::vector<NodeID> >& rev_mapping = *rm;
LOG << "target min degree: " << target_mindeg;
#pragma omp parallel for schedule(dynamic, 1024)
for (NodeID p = 0; p < rev_mapping.size(); ++p) {
NodeID bestNode;
NodeWeight improve = 0;
NodeWeight node_degree = 0;
NodeWeight block_degree = 0;
if (rev_mapping[p].size() < std::log2(G->number_of_nodes())) {
NodeID improve_idx;
for (NodeID node = 0; node < rev_mapping[p].size(); ++node) {
NodeID vtx = rev_mapping[p][node];
for (EdgeID e : G->edges_of(vtx)) {
auto [t, w] = G->getEdge(vtx, e);
auto contracted_target = mapping[t];
if (contracted_target == p) {
node_degree += w;
continue;
}
node_degree -= w;
block_degree += w;
}
if (improve > node_degree) {
improve = node_degree;
bestNode = rev_mapping[p][node];
improve_idx = node;
}
node_degree = 0;
}
if (improve > 0 &&
block_degree + improve < target_mindeg &&
rev_mapping[p].size() > 1) {
target_mindeg = block_degree + improve;
rev_mapping[p].erase(rev_mapping[p].begin() +
improve_idx);
VIECUT_ASSERT_LT(bestNode, G->number_of_nodes());
rev_mapping.push_back({ bestNode });
mapping[bestNode] = rev_mapping.size() - 1;
}
}
}
LOG << "target min degree now: " << target_mindeg;
}
// contraction global_mincut for small number of nodes in constructed graph,
// we assume a full mesh and remove nonexistent edges afterwards.
static graphAccessPtr contractGraphFullMesh(
graphAccessPtr G,
const std::vector<NodeID>& mapping,
size_t num_nodes) {
auto contracted = std::make_shared<graph_access>();
std::vector<EdgeWeight> intermediate(num_nodes * (num_nodes - 1), 0);
#pragma omp parallel
{
std::vector<EdgeWeight> p_intermediate(
num_nodes * (num_nodes - 1), 0);
#pragma omp for schedule(dynamic, 1024)
for (NodeID n = 0; n < G->number_of_nodes(); ++n) {
NodeID src = mapping[n];
for (EdgeID e : G->edges_of(n)) {
NodeID tgt = mapping[G->getEdgeTarget(e)];
if (tgt != src) {
EdgeID edge_id =
src * (num_nodes - 1) + tgt - (tgt > src);
p_intermediate[edge_id] += G->getEdgeWeight(e);
}
}
}
#pragma omp critical
{
for (size_t i = 0; i < intermediate.size(); ++i) {
intermediate[i] += p_intermediate[i];
}
}
}
EdgeID existing_edges = intermediate.size();
for (auto e : intermediate) {
if (e == 0)
--existing_edges;
}
contracted->start_construction(num_nodes, existing_edges);
for (size_t i = 0; i < num_nodes; ++i) {
contracted->new_node();
for (size_t j = 0; j < num_nodes; ++j) {
if (i == j)
continue;
EdgeID edge_id = i * (num_nodes - 1) + j - (j > i);
if (intermediate[edge_id] > 0) {
EdgeID edge = contracted->new_edge(i, j);
contracted->setEdgeWeight(edge, intermediate[edge_id]);
}
}
}
contracted->finish_construction();
return contracted;
}
static mutableGraphPtr fromUnionFind(mutableGraphPtr G, union_find* uf,
bool copy = false) {
bool save_cut = configuration::getConfig()->save_cut;
std::vector<std::vector<NodeID> > reverse_mapping(uf->n());
std::vector<NodeID> part(G->number_of_nodes(), UNDEFINED_NODE);
std::vector<NodeID> mapping(G->n());
NodeID current_pid = 0;
for (NodeID n : G->nodes()) {
NodeID part_id = uf->Find(n);
if (part[part_id] == UNDEFINED_NODE) {
part[part_id] = current_pid++;
}
mapping[n] = part[part_id];
if (save_cut) {
G->setPartitionIndex(n, part[part_id]);
}
reverse_mapping[part[part_id]].push_back(
G->containedVertices(n)[0]);
}
return contractGraph(G, mapping, reverse_mapping, copy);
}
static mutableGraphPtr contractGraph(
mutableGraphPtr G,
const std::vector<NodeID>& mapping,
const std::vector<std::vector<NodeID> >& reverse_mapping,
bool copy = true) {
if (reverse_mapping.size() * 1.2 > G->n() || !copy) {
return contractGraphVtxset(G, mapping, reverse_mapping, copy);
} else {
return contractGraphSparse(G, mapping, reverse_mapping.size());
}
}
static mutableGraphPtr contractGraphVtxset(
mutableGraphPtr G,
const std::vector<NodeID>&,
const std::vector<std::vector<NodeID> >& reverse_mapping,
bool copy) {
mutableGraphPtr H;
if (copy) {
H = std::make_shared<mutable_graph>(*G);
} else {
H = G;
}
for (size_t i = 0; i < reverse_mapping.size(); ++i) {
if (reverse_mapping[i].size() > 1) {
std::unordered_set<NodeID> vtx_to_ctr;
for (auto v : reverse_mapping[i]) {
vtx_to_ctr.emplace(H->getCurrentPosition(v));
}
H->contractVertexSet(vtx_to_ctr);
}
}
if (copy) {
for (size_t i = 0; i < reverse_mapping.size(); ++i) {
for (auto v : reverse_mapping[i]) {
G->setPartitionIndex(v, H->getCurrentPosition(v));
}
}
H->resetContainedvertices();
}
return H;
}
static graphAccessPtr fromUnionFind(
graphAccessPtr G,
union_find* uf,
bool = false) {
std::vector<std::vector<NodeID> > rev_mapping;
const bool save_cut = configuration::getConfig()->save_cut;
std::vector<NodeID> mapping(G->number_of_nodes());
std::vector<NodeID> part(G->number_of_nodes(), UNDEFINED_NODE);
NodeID current_pid = 0;
for (NodeID n : G->nodes()) {
NodeID part_id = uf->Find(n);
if (part[part_id] == UNDEFINED_NODE) {
part[part_id] = current_pid++;
rev_mapping.emplace_back();
}
mapping[n] = part[part_id];
if (save_cut) {
G->setPartitionIndex(n, part[part_id]);
}
rev_mapping[part[part_id]].push_back(n);
}
return contractGraph(G, mapping, rev_mapping);
}
static graphAccessPtr
contractGraph(graphAccessPtr G,
const std::vector<NodeID>& mapping,
const std::vector<std::vector<NodeID> >& reverse_mapping) {
if (reverse_mapping.size() > std::sqrt(G->number_of_nodes())) {
LOG << "SPARSE CONTRACT!";
return contractGraphSparse(G, mapping, reverse_mapping.size());
} else {
LOG << "FULL MESH CONTRACT";
return contractGraphFullMesh(G, mapping, reverse_mapping.size());
}
}
// altered version of KaHiPs matching contraction
template <class GraphPtr>
static GraphPtr
contractGraphSparse(GraphPtr G,
const std::vector<NodeID>& mapping,
size_t num_nodes) {
// contested edge (both incident vertices have at least V/5 vertices)
// compute value for this edge on every processor to allow parallelism
timer t;
EdgeID contested_edge = 0;
NodeID block0 = 0;
NodeID block1 = 0;
if (G->m() * 0.02 < G->n() * G->n() && G->n() > 100) {
std::vector<uint32_t> el(num_nodes);
for (size_t i = 0; i < mapping.size(); ++i) {
++el[mapping[i]];
}
std::vector<uint32_t> orig_el = el;
std::nth_element(el.begin(), el.begin() + 1, el.end(),
std::greater<uint32_t>());
if (el[1] > G->number_of_nodes() / 5) {
block0 = std::distance(orig_el.begin(),
std::find(orig_el.begin(),
orig_el.end(), el[0]));
block1 = std::distance(orig_el.begin(),
std::find(orig_el.begin(),
orig_el.end(), el[1]));
contested_edge = get_uint64_from_pair(block1, block0);
}
}
EdgeWeight sumweight_contested = 0;
auto coarser = std::make_shared<typename GraphPtr::element_type>();
std::vector<std::vector<std::pair<PartitionID, EdgeWeight> > >
building_tool(num_nodes);
std::vector<size_t> degrees(num_nodes);
growt::uaGrow<xxhash<uint64_t> > new_edges(1024 * 1024);
t.restart();
std::vector<size_t> cur_degrees(num_nodes);
#pragma omp parallel
{
EdgeWeight contested_weight = 0;
std::vector<uint64_t> my_keys;
auto handle = new_edges.get_handle();
#pragma omp for schedule(guided)
for (NodeID n = 0; n < G->number_of_nodes(); ++n) {
NodeID p = mapping[n];
for (EdgeID e : G->edges_of(n)) {
auto [tgt, wgt] = G->getEdge(n, e);
NodeID contracted_target = mapping[tgt];
if (contracted_target >= p) {
// self-loops are not in graph
// smaller do not need to be stored
// as their other side will be
continue;
}
uint64_t key = get_uint64_from_pair(p, contracted_target);
if (key != contested_edge) {
if (handle.insert_or_update(key, wgt,
[](size_t& lhs,
const size_t& rhs) {
lhs += rhs;
}, wgt).second) {
#pragma omp atomic
++degrees[p];
#pragma omp atomic
++degrees[contracted_target];
my_keys.push_back(key);
}
} else {
contested_weight += wgt;
}
}
}
if (contested_edge > 0) {
#pragma omp critical
{
sumweight_contested += contested_weight;
}
#pragma omp barrier
#pragma omp single
{
if (sumweight_contested > 0) {
handle.insert_or_update(contested_edge,
sumweight_contested,
[](size_t& lhs,
const size_t& rhs) {
lhs += rhs;
}, sumweight_contested);
my_keys.push_back(contested_edge);
++degrees[block0];
++degrees[block1];
}
}
}
if constexpr (std::is_same<GraphPtr, graphAccessPtr>::value) {
#pragma omp single
{
size_t num_edges = 0;
coarser->start_construction(num_nodes, 0);
for (size_t i = 0; i < degrees.size(); ++i) {
cur_degrees[i] = num_edges;
num_edges += degrees[i];
coarser->new_node_hacky(num_edges);
}
coarser->resize_m(num_edges);
}
for (auto edge_uint : my_keys) {
auto edge = get_pair_from_uint64(edge_uint);
auto edge_weight = (*handle.find(edge_uint)).second;
size_t firstdeg, seconddeg;
while (true) {
firstdeg = cur_degrees[edge.first];
size_t plusone = cur_degrees[edge.first] + 1;
if (__sync_bool_compare_and_swap(
&cur_degrees[edge.first], firstdeg, plusone))
break;
}
while (true) {
seconddeg = cur_degrees[edge.second];
size_t plusone = cur_degrees[edge.second] + 1;
if (__sync_bool_compare_and_swap(
&cur_degrees[edge.second], seconddeg, plusone))
break;
}
coarser->new_edge_and_reverse(
edge.first, edge.second, firstdeg,
seconddeg, edge_weight);
}
} else {
#pragma omp single
coarser->start_construction(num_nodes);
#pragma omp critical
{
for (auto k : my_keys) {
auto edge = get_pair_from_uint64(k);
auto wgt = (*handle.find(k)).second;
coarser->new_edge_order(edge.first, edge.second, wgt);
}
}
}
}
coarser->finish_construction();
return coarser;
}
static graphAccessPtr
contractGraphSparseNoHash(graphAccessPtr G,
const std::vector<NodeID>& mapping,
size_t num_nodes) {
std::vector<std::vector<NodeID> > rev_map;
rev_map.resize(num_nodes);
for (size_t i = 0; i < mapping.size(); ++i) {
rev_map[mapping[i]].push_back(i);
}
auto contracted = std::make_shared<graph_access>();
std::vector<std::vector<std::pair<NodeID, EdgeWeight> > > edges;
edges.resize(rev_map.size());
#pragma omp parallel
{
#pragma omp single nowait
{
double average_degree =
static_cast<double>(G->number_of_edges()) /
static_cast<double>(G->number_of_nodes());
EdgeID expected_edges = num_nodes * average_degree;
// one worker can do this vector allocation while the others
// build the contracted graph
contracted->start_construction(num_nodes,
std::min(G->number_of_edges(),
2 * expected_edges));
}
// first: coarse vertex which set this (to avoid total invalidation)
// second: edge id in contracted graph
std::vector<std::pair<NodeID, EdgeWeight> > edge_positions(
num_nodes,
std::make_pair(UNDEFINED_NODE, UNDEFINED_EDGE));
std::vector<NodeID> non_null;
#pragma omp for schedule(dynamic)
for (NodeID p = 0; p < num_nodes; ++p) {
for (NodeID node = 0; node < rev_map[p].size(); ++node) {
for (EdgeID e : G->edges_of(rev_map[p][node])) {
NodeID contracted_target = mapping[G->getEdgeTarget(e)];
if (contracted_target == p)
continue;
NodeID last_use =
edge_positions[contracted_target].first;
if (last_use == p) {
edge_positions[contracted_target].second +=
G->getEdgeWeight(e);
} else {
edge_positions[contracted_target].first = p;
edge_positions[contracted_target].second =
G->getEdgeWeight(e);
non_null.push_back(contracted_target);
}
}
}
for (const auto& tgt : non_null) {
edges[p].emplace_back(tgt, edge_positions[tgt].second);
}
non_null.clear();
}
}
for (const auto& vec : edges) {
NodeID n = contracted->new_node();
for (const auto& e : vec) {
EdgeID e_new = contracted->new_edge(n, e.first);
contracted->setEdgeWeight(e_new, e.second);
}
}
contracted->finish_construction();
return contracted;
}
};
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/shear.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortMethod *method,const size_t number_arguments,const double *arguments,
size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) ResetMagickMemory(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod,
exception);
if (image->alpha_trait == UndefinedPixelTrait)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel,
exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
distort alpha channel separately
*/
Image
*resize_alpha;
(void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_alpha == (Image *) NULL)
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
(void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception);
(void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save,exception);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->alpha_trait=image->alpha_trait;
resize_image->compose=image->compose;
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image, DistortMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
register ssize_t
i;
char image_gen[MagickPathExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method) {
case AffineDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case PerspectiveDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr, "Perspective Projection:\n");
(void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '");
for (i=0; i<4; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for (; i<7; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n",
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n",
coeff[8] < 0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
(void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
0.5-coeff[3], 0.5-coeff[7]);
(void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if ( coeff[9] != 0 ) {
(void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",
-2*coeff[9], coeff[4], -coeff[0]);
(void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",
coeff[9]);
} else
(void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4], coeff[0]);
(void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",
-coeff[1], coeff[0], coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup);
else
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case BilinearReverseDistortion:
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",
coeff[0],(unsigned long) nterms);
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n yy =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n");
for ( i=0; i<5; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n",
coeff[1], coeff[4]);
(void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
-coeff[2], -coeff[3]);
(void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1], coeff[7] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] );
(void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] );
(void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] );
(void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n",
coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ double xc,yc;
/* NOTE: This does the barrel roll in pixel coords not image coords
** The internal distortion must do it in image coordinates,
** so that is what the center coeff (8,9) is given in.
*/
xc = ((double)image->columns-1.0)/2.0 + image->page.x;
yc = ((double)image->rows-1.0)/2.0 + image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n",
coeff[8]-0.5, coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[4],coeff[5],coeff[6],coeff[7]);
(void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/* The user provided a 'scale' expert option will scale the
output image size, by the factor given allowing for super-sampling
of the distorted image space. Any scaling factors must naturally
be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse)
{
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace,exception);
if (distort_image->background_color.alpha_trait != UndefinedPixelTrait)
distort_image->alpha_trait=BlendPixelTrait;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetPixelInfo(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
PixelInfo
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'alpha_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
ConformPixelInfo(distort_image,&distort_image->alpha_color,&invalid,
exception);
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 0
/*if ( i == 0 && j == 0 )*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelViaPixelInfo(distort_image,&invalid,q);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel,
exception);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelViaPixelInfo(distort_image,&pixel,q);
}
q+=GetPixelChannels(distort_image);
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DistortImage)
#endif
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
double
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=degrees;
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod,
exception);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const SparseColorMethod method,const size_t number_arguments,
const double *arguments,ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortMethod
distort_method;
distort_method=(DistortMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel to assign to distorted image */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=0.0;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=0.0;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=0.0;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=0.0;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red += arguments[x++]*weight;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green += arguments[x++]*weight;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue += arguments[x++]*weight;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black += arguments[x++]*weight;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha += arguments[x++]*weight;
denominator += weight;
}
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red/=denominator;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green/=denominator;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue/=denominator;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black/=denominator;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha/=denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for (k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=ClampPixel(QuantumRange*pixel.red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=ClampPixel(QuantumRange*pixel.green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=ClampPixel(QuantumRange*pixel.blue);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=ClampPixel(QuantumRange*pixel.black);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=ClampPixel(QuantumRange*pixel.alpha);
SetPixelViaPixelInfo(sparse_image,&pixel,q);
q+=GetPixelChannels(sparse_image);
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SparseColorImage)
#endif
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
cg.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - CG
This benchmark is an OpenMP C version of the NPB CG code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: M. Yarrow
C. Kuszmaul
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
/*
c---------------------------------------------------------------------
c Note: please observe that in the routine conj_grad three
c implementations of the sparse matrix-vector multiply have
c been supplied. The default matrix-vector multiply is not
c loop unrolled. The alternate implementations are unrolled
c to a depth of 2 and unrolled to a depth of 8. Please
c experiment with these to find the fastest for your particular
c architecture. If reporting timing results, any of these three may
c be used without penalty.
c---------------------------------------------------------------------
*/
#include "npb-C.h"
#include "npbparams.h"
#ifndef _BSIZE_
#define _BSIZE_ 256
#endif
#define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2)
#ifdef _OPENARC_
#if _BSIZE_ == 32
#pragma openarc #define _BSIZE_ 32
#elif _BSIZE_ == 64
#pragma openarc #define _BSIZE_ 64
#elif _BSIZE_ == 128
#pragma openarc #define _BSIZE_ 128
#elif _BSIZE_ == 256
#pragma openarc #define _BSIZE_ 256
#elif _BSIZE_ == 384
#pragma openarc #define _BSIZE_ 384
#endif
#pragma openarc #define NZ \NA*(\NONZER+1)*(\NONZER+1)+\NA*(\NONZER+2)
#endif
/* global variables */
/* common /partit_size/ */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /main_int_mem/ */
static int colidx[NZ+1]; /* colidx[1:NZ] */
static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */
static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */
static int arow[NZ+1]; /* arow[1:NZ] */
static int acol[NZ+1]; /* acol[1:NZ] */
/* common /main_flt_mem/ */
static float v[NA+1+1]; /* v[1:NA+1] */
static float aelt[NZ+1]; /* aelt[1:NZ] */
static float a[NZ+1]; /* a[1:NZ] */
static float x[NA+2+1]; /* x[1:NA+2] */
static float z[NA+2+1]; /* z[1:NA+2] */
static float p[NA+2+1]; /* p[1:NA+2] */
static float q[NA+2+1]; /* q[1:NA+2] */
static float r[NA+2+1]; /* r[1:NA+2] */
static float w[NA+2+1]; /* w[1:NA+2] */
/* common /urando/ */
static float amult;
static float tran;
// Static variables used in conj_grad().
static float d, sum, rho, rho0, alpha, beta;
/* function declarations */
static void conj_grad (int colidx[NZ+1], int rowstr[NA+1+1], float x[NA+2+1], float z[NA+2+1],
float a[NZ+1], float p[NA+2+1], float q[NA+2+1], float r[NA+2+1],
float w[NA+2+1], float *rnorm);
static void makea(int n, int nz, float a[NZ+1], int colidx[NZ+1], int rowstr[NA+1+1],
int nonzer, int firstrow, int lastrow, int firstcol,
int lastcol, float rcond, int arow[NZ+1], int acol[NZ+1],
float aelt[NZ+1], float v[NA+1+1], int iv[2*NA+1+1], float shift );
static void sparse(float a[NZ+1], int colidx[NZ+1], int rowstr[NA+1+1], int n,
int arow[NZ+1], int acol[NZ+1], float aelt[NZ+1],
int firstrow, int lastrow,
float x[NA+1+1], boolean mark[NA+1], int nzloc[NA+1], int nnza);
static void sprnvc(int n, int nz, float v[], int iv[], int nzloc[],
int mark[]);
static int icnvrt(float x, int ipwr2);
static void vecset(int n, float v[], int iv[], int *nzv, int i, float val);
/*--------------------------------------------------------------------
program cg
--------------------------------------------------------------------*/
int main(int argc, char *argv[]) {
int i_main, j_main, k_main, it;
int nthreads = 1;
float zeta;
float rnorm;
float norm_temp11;
float norm_temp12;
float t, mflops;
char classT = 'U';
boolean verified;
float zeta_verify_value, epsilon;
////////////////////////////////////
// Used for inlining conj_grad(). //
////////////////////////////////////
int i, j, k;
int cgit, cgitmax = 25;
firstrow = 1;
lastrow = NA;
firstcol = 1;
lastcol = NA;
#pragma aspen control ignore
if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) {
classT = 'S';
// zeta_verify_value = 8.5971775078648;
zeta_verify_value = 8.379274368286; //serial version value with Single Precision
} else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) {
classT = 'W';
// zeta_verify_value = 10.362595087124;
zeta_verify_value = 10.11725139618; //serial version value with Single Precision
} else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) {
classT = 'A';
// zeta_verify_value = 17.130235054029;
zeta_verify_value = 18.62915039062; //serial version value with Single Precision
} else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) {
classT = 'B';
// zeta_verify_value = 22.712745482631;
zeta_verify_value = 62.42129135132; //serial version value with Single Precision
} else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) {
classT = 'C';
// zeta_verify_value = 28.973605592845;
zeta_verify_value = 115.1209869385; //serial version value with Single Precision
} else {
classT = 'U';
}
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - CG Benchmark\n");
printf(" Size: %10d\n", NA);
printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
timer_clear(2);
timer_clear(3);
timer_clear(4);
timer_start(2);
/*--------------------------------------------------------------------
c Initialize random number generator
c-------------------------------------------------------------------*/
// Initial numbers are changed for single precision
// tran = 314159265.0;
// amult = 1220703125.0;
tran = 28183.0f;
amult = 390625.0f;
zeta = randlc( &tran, amult );
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
timer_start(4);
#pragma aspen control ignore
makea(naa, nzz, a, colidx, rowstr, NONZER,
firstrow, lastrow, firstcol, lastcol,
RCOND, arow, acol, aelt, v, iv, SHIFT);
timer_stop(4);
timer_start(3);
/*---------------------------------------------------------------------
c Note: as a result of the above call to makea:
c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1
c values of colidx which are col indexes go from firstcol --> lastcol
c So:
c Shift the col index vals from actual (firstcol --> lastcol )
c to local, i.e., (1 --> lastcol-firstcol+1)
c---------------------------------------------------------------------*/
#pragma acc data \
create(x[0:NA+3]) \
create(z[0:NA+3]) \
create(p[0:NA+3]) \
create(q[0:NA+3]) \
create(r[0:NA+3]) \
create(w[0:NA+3]) \
copyin(a[0:NZ+1]) \
copyin(colidx[0:NZ+1]) \
copyin(rowstr[0:NA+2])
{
timer_stop(3);
// R/O Shared scalar: lastrow, firstrow, firstcol
// R/O Shared arrays: rowstr[NA+1+1]
// R/W Shared arrays: colidx[NZ+1]
// R/W Private scalar: j_main, k_main
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastrow - firstrow + 1; j_main++) {
#pragma aspen declare param(aspen_nonzerosperrow:(NONZER+1)*(NONZER+1)+NONZER+2)
#pragma aspen control loop(aspen_nonzerosperrow)
for (k_main = rowstr[j_main]; k_main < rowstr[j_main+1]; k_main++) {
colidx[k_main] = colidx[k_main] - firstcol + 1;
}
}
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalar: i_main
#pragma acc kernels loop gang worker
for (i_main = 1; i_main <= NA+1; i_main++) {
x[i_main] = 1.0f;
}
// R/W Shared scalar: zeta
zeta = 0.0f;
/*-------------------------------------------------------------------
c---->
c Do one iteration untimed to init all code and data page tables
c----> (then reinit, start timing, to niter its)
c-------------------------------------------------------------------*/
for (it = 1; it <= 1; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
//conj_grad (colidx, rowstr, x, z, a, p, q, r, w, &rnorm);
cgitmax = 25;
// R/W Shared scalars: rho (function-static)
rho = 0.0f;
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1], r[NA+2+1]
// R/W Shared arrays: q[NA+2+1], z[NA+2+1], r[NA+2+1], p[NA+2+1], w[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= NA+1; j++) {
q[j] = 0.0f;
z[j] = 0.0f;
r[j] = x[j];
p[j] = r[j];
w[j] = 0.0f;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + x[j]*x[j];
}
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
// R/W Shared scalars: d, rho, rho0 (function-static)
{
rho0 = rho;
d = 0.0f;
rho = 0.0f;
} /* end single */
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
// R/O Shared scalars: lastrow, firstrow
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], p[NA+2+1], colidx[NZ+1],
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j, k, sum
#pragma acc kernels loop gang worker independent private(sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0f;
#pragma aspen control loop(aspen_nonzerosperrow)
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
w[j] = sum;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: q[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared scalars: d (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0f;
d = d + p[j]*q[j];
}
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, d (function-static)
// R/W Shared scalars: alpha (function-static)
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: alpha (function-static)
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared arrays: z[NA+2+1], r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, rho (function-static)
// R/W Shared scalars: beta (function-static)
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: beta (function-static)
// R/O Shared arrays: r[NA+2+1]
// R/W Shared arrays: p[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
// R/W Shared scalars: sum (function-static)
sum = 0.0f;
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], colidx[NZ+1], z[NA+2+1]
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j,d,k
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0f;
#pragma aspen control loop(aspen_nonzerosperrow)
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
w[j] = d;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
r[j] = w[j];
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1], x[NA+2+1]
// R/W Shared scalars: d, sum (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
// R/O Shared scalars: sum (function-static)
// R/W Shared scalars: rnorm
{
//(*rnorm) = sqrtf(sum);
rnorm = sqrtf(sum);
} /* end single */
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
// R/W Shared scalars: norm_temp11, norm_temp12
{
norm_temp11 = 0.0f;
norm_temp12 = 0.0f;
} /* end single */
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1], z[NA+2+1]
// R/W Shared scalars: norm_temp11, norm_temp12
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
norm_temp11 = norm_temp11 + x[j_main]*z[j_main];
norm_temp12 = norm_temp12 + z[j_main]*z[j_main];
}
// R/w Shared scalars: norm_temp12
norm_temp12 = 1.0f / sqrtf( norm_temp12 );
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol, norm_temp12
// R/O Shared arrays: z[NA+2+1]
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
x[j_main] = norm_temp12*z[j_main];
}
} /* end of do one iteration untimed */
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: i_main
#pragma acc kernels loop gang worker
for (i_main = 1; i_main <= NA+1; i_main++) {
x[i_main] = 1.0f;
}
// R/W Shared scalars: zeta
zeta = 0.0f;
// } /* end parallel */
timer_clear( 1 );
timer_start( 1 );
/*--------------------------------------------------------------------
c---->
c Main Iteration for inverse power method
c---->
c-------------------------------------------------------------------*/
//#pragma omp parallel private(it,i_main,j_main,k_main)
// {
for (it = 1; it <= NITER; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
//conj_grad(colidx, rowstr, x, z, a, p, q, r, w, &rnorm);
cgitmax = 25;
// R/W Shared scalars: rho (function-static)
rho = 0.0f;
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1], r[NA+2+1]
// R/W Shared arrays: q[NA+2+1], z[NA+2+1], r[NA+2+1], p[NA+2+1], w[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= NA+1; j++) {
q[j] = 0.0f;
z[j] = 0.0f;
r[j] = x[j];
p[j] = r[j];
w[j] = 0.0f;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + x[j]*x[j];
}
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
// R/W Shared scalars: d, rho, rho0 (function-static)
{
rho0 = rho;
d = 0.0f;
rho = 0.0f;
} /* end single */
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
// R/O Shared scalars: lastrow, firstrow
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], p[NA+2+1], colidx[NZ+1],
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j, k, sum
#pragma acc kernels loop gang worker independent private(sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0f;
#pragma aspen control loop(aspen_nonzerosperrow)
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
w[j] = sum;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: q[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared scalars: d (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0f;
d = d + p[j]*q[j];
}
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, d (function-static)
// R/W Shared scalars: alpha (function-static)
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: alpha (function-static)
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared arrays: z[NA+2+1], r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, rho (function-static)
// R/W Shared scalars: beta (function-static)
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: beta (function-static)
// R/O Shared arrays: r[NA+2+1]
// R/W Shared arrays: p[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
// R/W Shared scalars: sum (function-static)
sum = 0.0f;
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], colidx[NZ+1], z[NA+2+1]
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j,d,k
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0f;
#pragma aspen control loop(aspen_nonzerosperrow)
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
w[j] = d;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
r[j] = w[j];
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1], x[NA+2+1]
// R/W Shared scalars: d, sum (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
// R/O Shared scalars: sum (function-static)
// R/W Shared scalars: rnorm
{
//(*rnorm) = sqrtf(sum);
rnorm = sqrtf(sum);
} /* end single */
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
// R/W Shared scalars: norm_temp11, norm_temp12
{
norm_temp11 = 0.0f;
norm_temp12 = 0.0f;
} /* end single */
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1], z[NA+2+1]
// R/W Shared scalars: norm_temp11, norm_temp12
// R/W Private scalars: j_main
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
norm_temp11 = norm_temp11 + x[j_main]*z[j_main];
norm_temp12 = norm_temp12 + z[j_main]*z[j_main];
}
// R/O Shared scalars: norm_temp11
// R/W Shared scalars: norm_temp12, zeta
{
norm_temp12 = 1.0f / sqrtf( norm_temp12 );
zeta = SHIFT + 1.0f / norm_temp11;
} /* end single */
{
if( it == 1 ) {
printf(" iteration ||r|| zeta\n");
}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
} /* end master */
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol, norm_temp12
// R/O Shared arrays: z[NA+2+1]
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
x[j_main] = norm_temp12*z[j_main];
}
} /* end of main iter inv pow meth */
#if defined(_OPENMP)
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop( 1 );
timer_stop( 2 );
/*--------------------------------------------------------------------
c End of timed section
c-------------------------------------------------------------------*/
t = timer_read( 1 );
printf(" Benchmark completed\n");
//epsilon = 1.0e-10;
//New value for single precision
epsilon = 1.0e-6;
#pragma aspen control ignore
if (classT != 'U') {
if (fabs(zeta - zeta_verify_value) <= epsilon) {
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.12e\n", zeta);
printf(" Error is %20.12e\n", zeta - zeta_verify_value);
} else {
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.12e\n", zeta);
printf(" The correct zeta is %20.12e\n", zeta_verify_value);
}
} else {
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
#pragma aspen control probability(1)
if ( t != 0.0 ) {
mflops = (2.0*NITER*NA)
* (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 )
/ t / 1000000.0;
} else {
mflops = 0.0;
}
c_print_results("CG", classT, NA, 0, 0, NITER, nthreads, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
printf("makea() execution time = %12.4f\n", timer_read(4));
printf("CUDA Initialization time = %12.4f\n", timer_read(3));
printf("Total execution time = %12.4f\n", timer_read(2));
return 0;
}
/*---------------------------------------------------------------------
c generate the test problem for benchmark 6
c makea generates a sparse matrix with a
c prescribed sparsity distribution
c
c parameter type usage
c
c input
c
c n i number of cols/rows of matrix
c nz i nonzeros as declared array size
c rcond r*8 condition number
c shift r*8 main diagonal shift
c
c output
c
c a r*8 array for nonzeros
c colidx i col indices
c rowstr i row pointers
c
c workspace
c
c iv, arow, acol i
c v, aelt r*8
c---------------------------------------------------------------------*/
static void makea(
int n,
int nz,
float a[NZ+1], /* a[1:nz] */
int colidx[NZ+1], /* colidx[1:nz] */
int rowstr[NA+1+1], /* rowstr[1:n+1] */
int nonzer,
int firstrow,
int lastrow,
int firstcol,
int lastcol,
float rcond,
int arow[NZ+1], /* arow[1:nz] */
int acol[NZ+1], /* acol[1:nz] */
float aelt[NZ+1], /* aelt[1:nz] */
float v[NA+1+1], /* v[1:n+1] */
int iv[2*NA+1+1], /* iv[1:2*n+1] */
float shift )
{
int i, nnza, iouter, ivelt, ivelt1, irow, nzv;
/*--------------------------------------------------------------------
c nonzer is approximately (int(sqrt(nnza /n)));
c-------------------------------------------------------------------*/
float size, ratio, scale;
int jcol;
size = 1.0f;
ratio = pow(rcond, (1.0f / (float)n));
nnza = 0;
/*---------------------------------------------------------------------
c Initialize colidx(n+1 .. 2n) to zero.
c Used by sprnvc to mark nonzero positions
c---------------------------------------------------------------------*/
// R/O Shared scalars: n
// R/W Shared arrays: colidx[NZ+1]
// R/W Private scalars: i
#pragma acc kernels loop gang worker copyout(colidx)
for (i = 1; i <= n; i++) {
colidx[n+i] = 0;
}
for (iouter = 1; iouter <= n; iouter++) {
nzv = nonzer;
sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n]));
vecset(n, v, iv, &nzv, iouter, 0.5);
for (ivelt = 1; ivelt <= nzv; ivelt++) {
jcol = iv[ivelt];
if (jcol >= firstcol && jcol <= lastcol) {
scale = size * v[ivelt];
for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) {
irow = iv[ivelt1];
if (irow >= firstrow && irow <= lastrow) {
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in"
" makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = jcol;
arow[nnza] = irow;
aelt[nnza] = v[ivelt1] * scale;
}
}
}
}
size = size * ratio;
}
/*---------------------------------------------------------------------
c ... add the identity * rcond to the generated matrix to bound
c the smallest eigenvalue from below by rcond
c---------------------------------------------------------------------*/
for (i = firstrow; i <= lastrow; i++) {
if (i >= firstcol && i <= lastcol) {
iouter = n + i;
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = i;
arow[nnza] = i;
aelt[nnza] = rcond - shift;
}
}
/*---------------------------------------------------------------------
c ... make the sparse matrix from list of elements with duplicates
c (v and iv are used as workspace)
c---------------------------------------------------------------------*/
sparse(a, colidx, rowstr, n, arow, acol, aelt,
firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza);
}
/*---------------------------------------------------
c generate a sparse matrix from a list of
c [col, row, element] tri
c---------------------------------------------------*/
static void sparse(
float a[NZ+1], /* a[1:*] */
int colidx[NZ+1], /* colidx[1:*] */
int rowstr[NA+1+1], /* rowstr[1:*] */
int n,
int arow[NZ+1], /* arow[1:*] */
int acol[NZ+1], /* acol[1:*] */
float aelt[NZ+1], /* aelt[1:*] */
int firstrow,
int lastrow,
float x[NA+1+1], /* x[1:n] */
boolean mark[NA+1], /* mark[1:n] */
int nzloc[NA+1], /* nzloc[1:n] */
int nnza)
/*---------------------------------------------------------------------
c rows range from firstrow to lastrow
c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
c---------------------------------------------------------------------*/
{
int nrows;
int i, j, jajp1, nza, k, nzrow;
float xi;
/*--------------------------------------------------------------------
c how many rows of result
c-------------------------------------------------------------------*/
nrows = lastrow - firstrow + 1;
/*--------------------------------------------------------------------
c ...count the number of triples in each row
c-------------------------------------------------------------------*/
// R/O Shared scalars: n
// R/W Shared arrays: rowstr[NA+1+1], mark[n]
// R/W Private scalars: j
#pragma acc kernels loop gang worker \
copyout(rowstr[0:NA+1+1]) create(mark[0:NA+1])
for (j = 1; j <= n; j++) {
rowstr[j] = 0;
mark[j] = FALSE;
}
rowstr[n+1] = 0;
for (nza = 1; nza <= nnza; nza++) {
j = (arow[nza] - firstrow + 1) + 1;
rowstr[j] = rowstr[j] + 1;
}
rowstr[1] = 1;
for (j = 2; j <= nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
/*---------------------------------------------------------------------
c ... rowstr(j) now is the location of the first nonzero
c of row j of a
c---------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c ... do a bucket sort of the triples on the row index
c-------------------------------------------------------------------*/
for (nza = 1; nza <= nnza; nza++) {
j = arow[nza] - firstrow + 1;
k = rowstr[j];
a[k] = aelt[nza];
colidx[k] = acol[nza];
rowstr[j] = rowstr[j] + 1;
}
/*--------------------------------------------------------------------
c ... rowstr(j) now points to the first element of row j+1
c-------------------------------------------------------------------*/
for (j = nrows; j >= 1; j--) {
rowstr[j+1] = rowstr[j];
}
rowstr[1] = 1;
/*--------------------------------------------------------------------
c ... generate the actual output rows by adding elements
c-------------------------------------------------------------------*/
nza = 0;
// R/O Shared scalars: n
// R/W Shared arrays: x[NA+2+1], mark[n]
// R/W Private scalars: i
#pragma acc kernels loop gang worker copyout(x, mark)
for (i = 1; i <= n; i++) {
x[i] = 0.0f;
mark[i] = FALSE;
}
jajp1 = rowstr[1];
for (j = 1; j <= nrows; j++) {
nzrow = 0;
/*--------------------------------------------------------------------
c ...loop over the jth row of a
c-------------------------------------------------------------------*/
for (k = jajp1; k < rowstr[j+1]; k++) {
i = colidx[k];
x[i] = x[i] + a[k];
if ( mark[i] == FALSE && x[i] != 0.0f) {
mark[i] = TRUE;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
}
}
/*--------------------------------------------------------------------
c ... extract the nonzeros of this row
c-------------------------------------------------------------------*/
for (k = 1; k <= nzrow; k++) {
i = nzloc[k];
mark[i] = FALSE;
xi = x[i];
x[i] = 0.0f;
if (xi != 0.0f) {
nza = nza + 1;
a[nza] = xi;
colidx[nza] = i;
}
}
jajp1 = rowstr[j+1];
rowstr[j+1] = nza + rowstr[1];
}
}
/*---------------------------------------------------------------------
c generate a sparse n-vector (v, iv)
c having nzv nonzeros
c
c mark(i) is set to 1 if position i is nonzero.
c mark is all zero on entry and is reset to all zero before exit
c this corrects a performance bug found by John G. Lewis, caused by
c reinitialization of mark on every one of the n calls to sprnvc
---------------------------------------------------------------------*/
static void sprnvc(
int n,
int nz,
float v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int nzloc[], /* nzloc[1:n] */
int mark[] ) /* mark[1:n] */
{
int nn1;
int nzrow, nzv, ii, i;
float vecelt, vecloc;
nzv = 0;
nzrow = 0;
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
/*--------------------------------------------------------------------
c nn1 is the smallest power of two not less than n
c-------------------------------------------------------------------*/
while (nzv < nz) {
vecelt = randlc(&tran, amult);
/*--------------------------------------------------------------------
c generate an integer between 1 and n in a portable manner
c-------------------------------------------------------------------*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
/*--------------------------------------------------------------------
c was this integer generated already?
c-------------------------------------------------------------------*/
if (mark[i] == 0) {
mark[i] = 1;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
nzv = nzv + 1;
v[nzv] = vecelt;
iv[nzv] = i;
}
}
for (ii = 1; ii <= nzrow; ii++) {
i = nzloc[ii];
mark[i] = 0;
}
}
/*---------------------------------------------------------------------
* scale a float precision number x in (0,1) by a power of 2 and chop it
*---------------------------------------------------------------------*/
static int icnvrt(float x, int ipwr2) {
return ((int)(ipwr2 * x));
}
/*--------------------------------------------------------------------
c set ith element of sparse vector (v, iv) with
c nzv nonzeros to val
c-------------------------------------------------------------------*/
static void vecset(
int n,
float v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int *nzv,
int i,
float val)
{
int k;
boolean set;
set = FALSE;
for (k = 1; k <= *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = TRUE;
}
}
if (set == FALSE) {
*nzv = *nzv + 1;
v[*nzv] = val;
iv[*nzv] = i;
}
}
|
matrice-mpi.c |
/**
* Matrix product with MPI/OpenMP
*
* <p>
* This execution is a hybrid parallel execution
* </p>
*
* @date 19/09/2020
* @author Jerome Dh
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
#include <omp.h>
#include "matrice-mpi.h"
/**
* Fill matrix with the random values
*
* @param V - Matrix
* @param M - x lenght
* @param N - y lenght
*/
void matrix_fill(float V[][T2], int M, int N) {
for(int i=0; i<M; i++) {
for(int j=0; j<N; j++) {
V[i][j] = (rand() % 2) + 1;
}
}
}
/**
* Print matrix
*/
void matrix_print(float V[][T2], int M, int N) {
for(int i=0; i<M; i++) {
for(int j=0; j<N; j++) {
printf("%.2lf, ", V[i][j]);
}
}
printf("\n");
}
int main(int argc, char** argv) {
srand(time(NULL));
int rank, size, TAG = 200;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
float A[T1][T2];
float B[T1][T2];
float C[T1][T2];
FILE *file_result = fopen(RESULT_FILENAME, "a");
if(file_result == NULL){
exit(EXIT_FAILURE);
}
// Processor clock time
clock_t t1 = clock();
printf("Processus MPI %d/%d..\n", rank+1, size);
// Init all variables and then send to all others processes
if(rank == 0)
{
// Fill the matrixes
matrix_fill(A, T1, T2);
matrix_fill(B, T1, T2);
// Send to all others process
for(int j = 1; j < size; j++)
{
MPI_Send(A, T1 * T2, MPI_FLOAT, j, TAG, MPI_COMM_WORLD);
MPI_Send(B, T1 * T2, MPI_FLOAT, j, TAG + 1, MPI_COMM_WORLD);
}
}
else
{
// Receive from master
MPI_Recv(A, T1 * T2, MPI_FLOAT, 0, TAG, MPI_COMM_WORLD, &status);
MPI_Recv(B, T1 * T2, MPI_FLOAT, 0, TAG + 1, MPI_COMM_WORLD, &status);
}
// Product of A * B
int i, j;
#pragma omp parallel for
for(i = rank * T1/size; i < (rank+1) * T1/size; i++)
{
for(j = 0; j<T2; j++) {
C[i][j] = 0;
for(int k=0; k<T2; k++) {
C[i][j] = C[i][j] + (A[i][k] * B[k][j]);
}
// printf("C[%d][%d]=%.2f, ", i, j, C[i][j]);
fprintf(file_result, "C[%d,%d]=%.2f\n", i, j, C[i][j]);
}
}
MPI_Barrier(MPI_COMM_WORLD);
// End of computing
if(rank == 0)
{
clock_t t2 = clock();
double diff_clock = ((double)t2 - (double)t1) / CLOCKS_PER_SEC;
printf("Fin de calcul !\nTemps Processeur ecoule: %lf sec\n", diff_clock);
// Write in file
FILE *file_out = fopen(OUT_FILENAME, "w");
if(file_result == NULL){
exit(EXIT_FAILURE);
}
fprintf(file_out, "%.5lf", diff_clock);
fclose(file_out);
}
MPI_Finalize();
return EXIT_SUCCESS;
} |
GB_binop__lxor_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lxor_uint64
// A.*B function (eWiseMult): GB_AemultB__lxor_uint64
// A*D function (colscale): GB_AxD__lxor_uint64
// D*A function (rowscale): GB_DxB__lxor_uint64
// C+=B function (dense accum): GB_Cdense_accumB__lxor_uint64
// C+=b function (dense accum): GB_Cdense_accumb__lxor_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lxor_uint64
// C=scalar+B GB_bind1st__lxor_uint64
// C=scalar+B' GB_bind1st_tran__lxor_uint64
// C=A+scalar GB_bind2nd__lxor_uint64
// C=A'+scalar GB_bind2nd_tran__lxor_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) != (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT64 || GxB_NO_LXOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lxor_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lxor_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lxor_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lxor_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lxor_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__lxor_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lxor_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lxor_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lxor_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__lxor_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__lxor_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cpl_geom_img-test.c | /*
* This file is part of the ESO Common Pipeline Library
* Copyright (C) 2001-2017 European Southern Observatory
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*-----------------------------------------------------------------------------
Includes
-----------------------------------------------------------------------------*/
#include <math.h>
#include <cpl_image_gen.h>
#include <cpl_imagelist_io.h>
#include <cpl_image_io.h>
#include <cpl_image_basic.h>
#include <cpl_bivector.h>
#include <cpl_vector.h>
#include <cpl_memory.h>
#include <cpl_msg.h>
#include <cpl_test.h>
#include <cpl_plot.h>
/* cpl_drand() */
#include <cpl_tools.h>
#include "cpl_apertures.h"
#include "cpl_geom_img.h"
/*-----------------------------------------------------------------------------
Define
-----------------------------------------------------------------------------*/
#ifndef IMAGESZ
#define IMAGESZ 256
#endif
#define NFRAMES 10
#define NSIGMAS 4
#define MAX_SHIFT_ERROR1 15
#define MAX_SHIFT_ERROR2 0.1
/*-----------------------------------------------------------------------------
Pricate functions
-----------------------------------------------------------------------------*/
static void cpl_geom_img_offset_saa_one(cpl_kernel);
static
void cpl_geom_img_offset_saa_bench(cpl_geom_combine, int, int, int, int, int);
static void cpl_imagelist_fill_shifted(cpl_imagelist *, cpl_size,
const double *, const double *);
/**@{*/
/*-----------------------------------------------------------------------------
Main
-----------------------------------------------------------------------------*/
int main(void)
{
/* These kernels preserve the actual pixel-values */
cpl_kernel kernels[] = {CPL_KERNEL_DEFAULT,
CPL_KERNEL_NEAREST};
const cpl_geom_combine geoms[] = {CPL_GEOM_INTERSECT, CPL_GEOM_UNION,
CPL_GEOM_FIRST};
/* Shift by non-integer amount to evaluate resampling */
const double off_x_init[] = { 0.0, -6.5, -18.5, 54.5, 33.5,
46.5, -3.5, 36.5, 42.5, -13.5};
const double off_y_init[] = { 0.0, 13.5, 3.5, 8.5, 32.5,
22.5, 18.5, -56.5, 3.5, 10.5};
cpl_imagelist * iset;
cpl_image * img;
cpl_bivector * offs_est;
cpl_vector * off_vec_x;
cpl_vector * off_vec_y;
cpl_bivector * offs_ref;
cpl_apertures * aperts;
int naperts;
cpl_bivector * aperts_pos;
cpl_vector * aperts_pos_x;
cpl_vector * aperts_pos_y;
cpl_vector * correl;
const double psigmas[] = {5, 2, 1, 0.5};
cpl_vector * sigmas;
cpl_image ** combined;
int i;
cpl_size pos;
cpl_test_init(PACKAGE_BUGREPORT, CPL_MSG_WARNING);
/* Verify the test data */
cpl_test_eq(sizeof(off_x_init), NFRAMES * sizeof(off_x_init[0]));
cpl_test_eq(sizeof(off_y_init), sizeof(off_x_init));
cpl_test_eq(sizeof(psigmas), NSIGMAS * sizeof(psigmas[0]));
for (i = 0; i < NFRAMES; i++) {
cpl_test_leq(fabs(off_x_init[i]), IMAGESZ);
cpl_test_leq(fabs(off_y_init[i]), IMAGESZ);
}
cpl_geom_img_offset_saa_one(CPL_KERNEL_DEFAULT);
cpl_geom_img_offset_saa_one(CPL_KERNEL_NEAREST);
if (cpl_msg_get_level() <= CPL_MSG_INFO) {
const double tprev = cpl_test_get_cputime();
const cpl_flops fprev = cpl_tools_get_flops();
double tpost, cputime;
cpl_flops fpost, nflops;
#ifndef _OPENMP
cpl_geom_img_offset_saa_bench(CPL_GEOM_FIRST, 10, 16, 4*IMAGESZ,
4*IMAGESZ, 0);
cpl_geom_img_offset_saa_bench(CPL_GEOM_FIRST, 6, 18, 4*IMAGESZ,
4*IMAGESZ, 1);
#endif
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i=0; i < 8; i++) {
cpl_geom_img_offset_saa_bench(CPL_GEOM_FIRST, 6, 18, 4*IMAGESZ,
4*IMAGESZ, 1);
}
tpost = cpl_test_get_cputime();
fpost = cpl_tools_get_flops();
cputime = tpost - tprev;
nflops = fpost - fprev;
cpl_msg_info(cpl_func, "Time to benchmark [s]: %g (%g MFLOP/s)",
cputime,
cputime > 0.0 ? (double)nflops/cputime/1e6 : 0.0);
} else {
cpl_geom_img_offset_saa_bench(CPL_GEOM_FIRST, 1, 4, IMAGESZ/4,
IMAGESZ/4, 1);
}
/* Bivector with 1 zero-valued element */
off_vec_x = cpl_vector_new(1);
cpl_vector_set(off_vec_x, 0, 0.0);
offs_ref = cpl_bivector_wrap_vectors(off_vec_x, off_vec_x);
/* Test with empty imagelist */
iset = cpl_imagelist_new();
combined = cpl_geom_img_offset_saa(iset, offs_ref,
CPL_KERNEL_DEFAULT,
0, 0, CPL_GEOM_FIRST,
NULL, NULL);
cpl_test_error(CPL_ERROR_ILLEGAL_INPUT);
cpl_test_null(combined);
/* Insert one image into imagelist */
img = cpl_image_fill_test_create(IMAGESZ, IMAGESZ);
cpl_imagelist_set(iset, img, 0);
for (i = 0; i < (int)(sizeof(geoms)/sizeof(geoms[0])); i++) {
const cpl_geom_combine geom = geoms[i];
/* Shift and add */
cpl_msg_info("", "Shift and add single image with geom number %d",
(int)geom);
combined = cpl_geom_img_offset_saa(iset, offs_ref,
CPL_KERNEL_DEFAULT,
0, 0, geom,
NULL, NULL);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
cpl_test_nonnull(combined[0]);
cpl_test_nonnull(combined[1]);
cpl_test_eq(cpl_image_get_type(combined[1]), CPL_TYPE_INT);
cpl_test_eq(cpl_image_get_min(combined[1]), 1);
cpl_test_eq(cpl_image_get_max(combined[1]), 1);
cpl_test_zero(cpl_image_count_rejected(combined[0]));
cpl_test_eq(cpl_image_get_size_x(combined[0]),
cpl_image_get_size_x(combined[1]));
cpl_test_eq(cpl_image_get_size_y(combined[0]),
cpl_image_get_size_y(combined[1]));
cpl_test_image_abs(combined[0], cpl_imagelist_get_const(iset, 0), 0.0);
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
}
cpl_bivector_unwrap_vectors(offs_ref);
cpl_vector_delete(off_vec_x);
cpl_imagelist_fill_shifted(iset, NFRAMES-1, off_x_init, off_y_init);
cpl_test_eq(cpl_imagelist_get_size(iset), NFRAMES);
/* Not modified */
off_vec_x = cpl_vector_wrap(NFRAMES, (double*)off_x_init);
off_vec_y = cpl_vector_wrap(NFRAMES, (double*)off_y_init);
offs_est = cpl_bivector_new(NFRAMES);
cpl_vector_copy(cpl_bivector_get_x(offs_est), off_vec_x);
cpl_vector_copy(cpl_bivector_get_y(offs_est), off_vec_y);
/* Distort the estimate */
cpl_vector_add_scalar(cpl_bivector_get_x(offs_est), 2.0);
cpl_vector_add_scalar(cpl_bivector_get_y(offs_est), -3.0);
sigmas = cpl_vector_wrap(NSIGMAS, (double*)psigmas); /* Not modified */
cpl_test_error(CPL_ERROR_NONE);
/* Get some cross-correlation apertures */
aperts = cpl_apertures_extract(cpl_imagelist_get_const(iset, 0), sigmas,
&pos);
cpl_vector_unwrap(sigmas);
cpl_test_nonnull(aperts);
naperts = cpl_apertures_get_size(aperts);
cpl_test_leq(1, naperts);
cpl_msg_info("","Detected %d apertures at sigma %g (%" CPL_SIZE_FORMAT "/%"
CPL_SIZE_FORMAT ")", naperts, psigmas[pos], 1+pos,
(cpl_size)NSIGMAS);
if (cpl_msg_get_level() <= CPL_MSG_DEBUG)
cpl_apertures_dump(aperts, stdout);
aperts_pos = cpl_bivector_new(naperts);
aperts_pos_x = cpl_bivector_get_x(aperts_pos);
aperts_pos_y = cpl_bivector_get_y(aperts_pos);
for (i=0; i<naperts; i++) {
cpl_vector_set(aperts_pos_x, i, cpl_apertures_get_pos_x(aperts, i+1));
cpl_vector_set(aperts_pos_y, i, cpl_apertures_get_pos_y(aperts, i+1));
}
cpl_apertures_delete(aperts);
cpl_test_error(CPL_ERROR_NONE);
/* Refine the offsets with cpl_geom_img_offset_fine */
cpl_msg_info("","Refine the offsets for %d images using %" CPL_SIZE_FORMAT
" anchors", NFRAMES, cpl_bivector_get_size(aperts_pos));
correl = cpl_vector_new(NFRAMES);
offs_ref = cpl_geom_img_offset_fine(iset, offs_est, aperts_pos,
15, 15, 15, 15, correl);
cpl_test_nonnull(offs_ref);
cpl_test_eq(cpl_bivector_get_size(offs_ref), NFRAMES);
cpl_vector_delete(correl);
cpl_bivector_delete(offs_est);
cpl_bivector_delete(aperts_pos);
cpl_test_vector_abs(cpl_bivector_get_x(offs_ref), off_vec_x,
MAX_SHIFT_ERROR2);
cpl_test_vector_abs(cpl_bivector_get_y(offs_ref), off_vec_y,
MAX_SHIFT_ERROR2);
cpl_test_nonnull(cpl_vector_unwrap(off_vec_x));
cpl_test_nonnull(cpl_vector_unwrap(off_vec_y));
for (i = 0; i < (int)(sizeof(geoms)/sizeof(geoms[0])); i++) {
const cpl_geom_combine geom = geoms[i];
const int rejmin = 1;
const int rejmax = 1;
const int maximg = NFRAMES - rejmin - rejmax;
/* Called like this, cpl_geom_img_offset_combine() is just
a wrapper around cpl_geom_img_offset_saa() */
cpl_image ** combined2
= cpl_geom_img_offset_combine(iset, offs_ref, 0, NULL, NULL, NULL,
0, 0, 0, 0, rejmin, rejmax, geom);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined2);
/* Shift and add */
cpl_msg_info("", "Shift and add with geom number %d", (int)geom);
combined = cpl_geom_img_offset_saa(iset, offs_ref, CPL_KERNEL_DEFAULT,
rejmin, rejmax, geom, NULL, NULL);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
if (combined == NULL) continue;
cpl_test_image_abs(combined[0], combined2[0], 0.0);
cpl_test_image_abs(combined[1], combined2[1], 0.0);
cpl_image_delete(combined2[0]);
cpl_image_delete(combined2[1]);
cpl_free(combined2);
cpl_test_eq(cpl_image_get_type(combined[1]), CPL_TYPE_INT);
if (cpl_image_get_min(combined[1]) == 0) {
cpl_test(cpl_image_count_rejected(combined[0]));
} else {
cpl_test_leq(1, cpl_image_get_min(combined[1]));
cpl_test_zero(cpl_image_count_rejected(combined[0]));
}
cpl_test_eq(cpl_image_get_size_x(combined[0]),
cpl_image_get_size_x(combined[1]));
cpl_test_eq(cpl_image_get_size_y(combined[0]),
cpl_image_get_size_y(combined[1]));
cpl_test_leq(cpl_image_get_max(combined[1]), maximg);
if (geom == CPL_GEOM_INTERSECT) {
cpl_test_eq(cpl_image_get_max(combined[1]), maximg);
cpl_test_leq(1, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_FIRST) {
/* FIXME: Should at least be 1 */
cpl_test_leq(0, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_UNION) {
cpl_test_leq(0, cpl_image_get_min(combined[1]));
}
cpl_msg_info("", "Minimum value in contribution map: %g",
cpl_image_get_min(combined[1]));
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
}
/* Shift and add without bad pixels */
for (i=0; i < NFRAMES; i++) {
cpl_image_accept_all(cpl_imagelist_get(iset, i));
}
for (i = 0; i < (int)(sizeof(geoms)/sizeof(geoms[0])); i++) {
int ityp;
for (ityp = 0; ityp < (int)(sizeof(kernels)/sizeof(kernels[0]));
ityp++) {
const cpl_geom_combine geom = geoms[i];
const int rejmin = 1;
const int rejmax = 1;
const int maximg = NFRAMES - rejmin - rejmax;
/* Shift and add */
cpl_msg_info("", "Shift and add with geom number %d and kernel "
"type %d", (int)geom, (int)kernels[ityp]);
combined = cpl_geom_img_offset_saa(iset, offs_ref, kernels[ityp],
rejmin, rejmax, geom,
NULL, NULL);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
if (combined == NULL) continue;
cpl_test_eq(cpl_image_get_type(combined[1]), CPL_TYPE_INT);
if (cpl_image_get_min(combined[1]) == 0) {
cpl_test(cpl_image_count_rejected(combined[0]));
} else {
cpl_test_leq(1, cpl_image_get_min(combined[1]));
cpl_test_zero(cpl_image_count_rejected(combined[0]));
}
cpl_test_eq(cpl_image_get_size_x(combined[0]),
cpl_image_get_size_x(combined[1]));
cpl_test_eq(cpl_image_get_size_y(combined[0]),
cpl_image_get_size_y(combined[1]));
cpl_test_leq(cpl_image_get_max(combined[1]), maximg);
if (geom == CPL_GEOM_INTERSECT) {
cpl_test_eq(cpl_image_get_max(combined[1]), maximg);
cpl_test_leq(1, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_FIRST) {
/* FIXME: Should at least be 1 */
cpl_test_leq(0, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_UNION) {
cpl_test_leq(0, cpl_image_get_min(combined[1]));
}
cpl_msg_info("", "Minimum value in contribution map: %g",
cpl_image_get_min(combined[1]));
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
}
}
cpl_bivector_delete(offs_ref);
img = cpl_imagelist_unset(iset, 0);
cpl_imagelist_delete(iset);
/* Shift and add of two uniform images - with no offsets */
iset = cpl_imagelist_new();
cpl_imagelist_set(iset, img, 0);
cpl_image_threshold(img, 1.0, 1.0, 1.0, 1.0);
cpl_image_accept_all(img);
img = cpl_image_duplicate(img);
cpl_imagelist_set(iset, img, 1);
off_vec_x = cpl_vector_new(2);
cpl_vector_fill(off_vec_x, 0.0);
offs_ref = cpl_bivector_wrap_vectors(off_vec_x,
cpl_vector_duplicate(off_vec_x));
if (cpl_msg_get_level() <= CPL_MSG_DEBUG)
cpl_plot_image("","","", img);
for (i = 0; i < (int)(sizeof(geoms)/sizeof(geoms[0])); i++) {
int ityp;
for (ityp = 0; ityp < (int)(sizeof(kernels)/sizeof(kernels[0]));
ityp++) {
const cpl_geom_combine geom = geoms[i];
double pos_x, pos_y;
cpl_msg_info("", "Shift and add with geom number %d and kernel "
"type %d", (int)geom, (int)kernels[ityp]);
combined = cpl_geom_img_offset_saa(iset, offs_ref, kernels[ityp],
0, 0, geom, &pos_x, &pos_y);
cpl_test_nonnull(combined);
if (combined == NULL) continue;
cpl_test_eq(cpl_image_get_size_x(combined[0]), IMAGESZ);
cpl_test_eq(cpl_image_get_size_x(combined[0]), IMAGESZ);
cpl_test_eq(cpl_image_get_type(combined[1]), CPL_TYPE_INT);
cpl_test_eq(cpl_image_get_size_x(combined[0]),
cpl_image_get_size_x(combined[1]));
cpl_test_eq(cpl_image_get_size_y(combined[0]),
cpl_image_get_size_y(combined[1]));
if (cpl_image_get_min(combined[1]) == 0) {
cpl_test(cpl_image_count_rejected(combined[0]));
} else {
cpl_test_leq(1, cpl_image_get_min(combined[1]));
cpl_test_zero(cpl_image_count_rejected(combined[0]));
}
cpl_test_eq(cpl_image_get_max(combined[1]), 2);
if (geom == CPL_GEOM_INTERSECT) {
cpl_test_eq(cpl_image_get_max(combined[1]), 2);
/* FIXME: Should at least be 1 */
cpl_test_leq(0, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_FIRST) {
/* FIXME: Minimum value is zero */
cpl_test_leq(0, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_UNION) {
cpl_test_leq(0, cpl_image_get_min(combined[1]));
}
#ifdef TEST_RESAMPLING
/* Resampling introduces noise at the edge */
/* NB: Comparison works for all modes, due to zero offset ... */
cpl_test_image_abs(combined[0], img, MAX_SHIFT_ERROR2);
#endif
if (cpl_msg_get_level() <= CPL_MSG_DEBUG) {
cpl_image_subtract(combined[0], img);
cpl_plot_image("","","", combined[0]);
}
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
/* Now try to combine two images, the second shifted along the X-axis */
cpl_image_shift(img, -MAX_SHIFT_ERROR1, 0);
cpl_image_accept_all(img);
cpl_vector_set(off_vec_x, 1, MAX_SHIFT_ERROR1);
combined = cpl_geom_img_offset_saa(iset, offs_ref, kernels[ityp],
0, 0, geom, &pos_x, &pos_y);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
if (combined == NULL) continue;
cpl_test_eq(cpl_image_get_max(combined[1]), 2);
if (cpl_image_get_min(combined[1]) == 0) {
cpl_test(cpl_image_count_rejected(combined[0]));
} else {
cpl_test_zero(cpl_image_count_rejected(combined[0]));
}
cpl_test_eq(cpl_image_get_size_y(combined[0]), IMAGESZ);
if (geom == CPL_GEOM_INTERSECT) {
#ifdef SAVE_COMBINED
cpl_image_save(combined[0], "PI.fits", CPL_TYPE_DOUBLE,
NULL, CPL_IO_CREATE);
cpl_image_save(combined[1], "CI.fits", CPL_TYPE_UCHAR,
NULL, CPL_IO_CREATE);
#endif
cpl_test_eq(cpl_image_get_size_x(combined[0]), IMAGESZ
- MAX_SHIFT_ERROR1);
} else if (geom == CPL_GEOM_FIRST) {
#ifdef SAVE_COMBINED
cpl_image_save(combined[0], "PF.fits", CPL_TYPE_DOUBLE,
NULL, CPL_IO_CREATE);
cpl_image_save(combined[1], "CF.fits", CPL_TYPE_UCHAR,
NULL, CPL_IO_CREATE);
#endif
cpl_test_eq(cpl_image_get_size_x(combined[0]), IMAGESZ);
} else if (geom == CPL_GEOM_UNION) {
cpl_test_eq(cpl_image_get_size_x(combined[0]), IMAGESZ
+ MAX_SHIFT_ERROR1);
#ifdef SAVE_COMBINED
cpl_image_save(combined[0], "PU.fits", CPL_TYPE_DOUBLE,
NULL, CPL_IO_CREATE);
cpl_image_save(combined[1], "CU.fits", CPL_TYPE_UCHAR,
NULL, CPL_IO_CREATE);
#endif
cpl_test_eq(cpl_image_get_min(combined[1]), 1);
}
img = cpl_imagelist_get(iset, 0);
if (cpl_msg_get_level() <= CPL_MSG_DEBUG) {
cpl_plot_image("","","", combined[0]);
if (geom == CPL_GEOM_FIRST) {
cpl_image_subtract(combined[0], img);
cpl_plot_image("","","", combined[0]);
}
}
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
/* Reset offset and 2nd image */
cpl_vector_fill(off_vec_x, 0.0);
img = cpl_image_duplicate(img);
cpl_imagelist_set(iset, img, 1);
}
}
cpl_imagelist_delete(iset);
cpl_bivector_delete(offs_ref);
return cpl_test_end(0);
}
/**@}*/
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Benchmark the CPL function
@param mode CPL_GEOM_INTERSECT, CPL_GEOM_UNION, CPL_GEOM_FIRST
@param nr The number of repeats
@param nz The number of planes
@param nx The image X-size
@param ny The image Y-size
@param no The number of outlier pixels to ignore (both min and max)
@return void
*/
/*----------------------------------------------------------------------------*/
static
void cpl_geom_img_offset_saa_bench(cpl_geom_combine mode, int nr, int nz,
int nx, int ny, int no)
{
cpl_bivector * offset = cpl_bivector_new(nz);
cpl_vector * off_x = cpl_bivector_get_x(offset);
cpl_vector * off_y = cpl_bivector_get_y(offset);
cpl_image * imgd = cpl_image_fill_test_create(nx, ny);
cpl_image * img = cpl_image_cast(imgd, CPL_TYPE_FLOAT);
cpl_imagelist * imglist = cpl_imagelist_new();
cpl_image ** combined;
double cputime = 0.0;
size_t bytes = 0;
cpl_flops nflops = 0;
int ir, iz;
cpl_test_leq(1, nz);
/* Create bivector of shifts, from 0.4 to 0.6 of the pixel range */
/* Create list of shifted images */
cpl_vector_set(off_x, 0, 0.0);
cpl_vector_set(off_y, 0, 0.0);
cpl_image_delete(imgd);
cpl_imagelist_set(imglist, img, 0);
for (iz = 1; iz < nz; iz++) {
cpl_image * copy = cpl_image_duplicate(img);
const int dx = (int)(0.1 * nx - 0.2 * nx * cpl_drand());
const int dy = (int)(0.1 * ny - 0.2 * ny * cpl_drand());
cpl_vector_set(off_x, iz, (double)dx);
cpl_vector_set(off_y, iz, (double)dy);
cpl_image_shift(copy, -dx, -dy);
cpl_image_accept_all(copy);
cpl_imagelist_set(imglist, copy, iz);
}
if (cpl_msg_get_level() <= CPL_MSG_DEBUG)
cpl_bivector_dump(offset, stdout);
bytes = (size_t)nr * cpl_test_get_bytes_imagelist(imglist);
for (ir = 0; ir < nr; ir++) {
const cpl_flops flops = cpl_tools_get_flops();
const double secs = cpl_test_get_cputime();
combined = cpl_geom_img_offset_saa(imglist, offset, CPL_KERNEL_DEFAULT,
no, no, mode, NULL, NULL);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
cputime += cpl_test_get_cputime() - secs;
nflops += cpl_tools_get_flops() - flops;
if (combined == NULL) continue;
cpl_test_nonnull(combined[0]);
cpl_test_nonnull(combined[1]);
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
}
cpl_msg_info(cpl_func, "Time to benchmark with mode=%d, nr=%d, nz=%d, "
"nx=%d, ny=%d, no=%d [s]: %g (%g MFLOP/s)",
mode, nr, nz, nx, ny, no, cputime,
cputime > 0.0 ? (double)nflops/cputime/1e6 : 0.0);
if (cputime > 0.0) {
cpl_msg_info(cpl_func,"Processing rate [MB/s]: %g",
1e-6 * (double)bytes / cputime);
}
cpl_bivector_delete(offset);
cpl_imagelist_delete(imglist);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Create imagelist of images shifted from the 1st image
@param self Imagelist with one image to append to
@param napp The number of shifted images to append
@param dx The array of n+1 X-shifts (0.0 as 1st element)
@param dy The array of n+1 Y-shifts (0.0 as 1st element)
@return void
@note On return the number of images in self will be n+1
*/
/*----------------------------------------------------------------------------*/
static
void cpl_imagelist_fill_shifted(cpl_imagelist * self, cpl_size napp,
const double * dx, const double * dy)
{
const cpl_image * img = cpl_imagelist_get_const(self, 0);
const cpl_size type = cpl_image_get_type (img);
const cpl_size nx = cpl_image_get_size_x(img);
const cpl_size ny = cpl_image_get_size_x(img);
const cpl_size ishift_0[2] = {0, 0};
const cpl_size ishift_x[2] = {1, 0};
const cpl_size ishift_y[2] = {0, 1};
const double xyradius = CPL_KERNEL_DEF_WIDTH;
cpl_vector * xyprofile = cpl_vector_new(CPL_KERNEL_DEF_SAMPLES);
cpl_polynomial * shift_x = cpl_polynomial_new(2);
cpl_polynomial * shift_y = cpl_polynomial_new(2);
cpl_error_code error;
cpl_size i;
cpl_test_eq(cpl_imagelist_get_size(self), 1);
cpl_test_leq(1, napp);
cpl_test_nonnull(dx);
cpl_test_nonnull(dy);
/* Identity transforms */
error = cpl_polynomial_set_coeff(shift_x, ishift_x, 1.0);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_polynomial_set_coeff(shift_y, ishift_y, 1.0);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* Resampling profile */
error = cpl_vector_fill_kernel_profile(xyprofile, CPL_KERNEL_DEFAULT,
xyradius);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* Append images to image set */
for (i=1; i < napp+1; i++) {
cpl_image * copy = cpl_image_new(nx, ny, type);
/* Shift in X and Y */
error = cpl_polynomial_set_coeff(shift_x, ishift_0, dx[i]);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_polynomial_set_coeff(shift_y, ishift_0, dy[i]);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_image_warp_polynomial(copy, img, shift_x, shift_y,
xyprofile, xyradius,
xyprofile, xyradius);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(self, copy, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
}
cpl_polynomial_delete(shift_x);
cpl_polynomial_delete(shift_y);
cpl_vector_delete(xyprofile);
cpl_test_eq(cpl_imagelist_get_size(self), napp+1);
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test the CPL function
@param kernel Kernel type
@return void
@note On return the number of images in self will be n+1
*/
/*----------------------------------------------------------------------------*/
static
void cpl_geom_img_offset_saa_one(cpl_kernel kernel)
{
const int nz = 2 + NFRAMES;
cpl_imagelist * imglist = cpl_imagelist_new();
cpl_image ** combined;
cpl_bivector * offset = cpl_bivector_new(nz);
cpl_vector * off_x = cpl_bivector_get_x(offset);
cpl_vector * off_y = cpl_bivector_get_y(offset);
cpl_error_code error;
cpl_size iz;
cpl_image * central0;
cpl_image * central1;
for (iz = 0; iz < nz; iz++) {
cpl_image * img = cpl_image_new(IMAGESZ, IMAGESZ, CPL_TYPE_FLOAT);
cpl_test_nonnull(img);
/* Insert flat images with known sum of the non-rejected planes */
error = cpl_image_add_scalar(img, (double)(nz - iz - nz/5));
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(imglist, img, iz);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_vector_set(off_x, iz, iz ? cpl_drand() : 0.0);
cpl_vector_set(off_y, iz, iz ? cpl_drand() : 0.0);
}
combined = cpl_geom_img_offset_saa(imglist, offset, kernel, nz/5, nz/4,
CPL_GEOM_INTERSECT, NULL, NULL);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
error = cpl_image_dump_structure(combined[1], stdout);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_eq(cpl_image_get_max(combined[0]),
(nz - nz/5 - nz/4 + 1) /2.0);
cpl_test_eq(cpl_image_get_max(combined[1]), nz - nz/5
- nz/4);
central0 = cpl_image_extract(combined[0], 3, 3, IMAGESZ - 2, IMAGESZ - 2);
central1 = cpl_image_extract(combined[1], 3, 3, IMAGESZ - 2, IMAGESZ - 2);
cpl_test_eq(cpl_image_get_min(central0),
(nz - nz/5 - nz/4 + 1) /2.0);
cpl_test_eq(cpl_image_get_min(central1), nz - nz/5 - nz/4);
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
cpl_image_delete(central0);
cpl_image_delete(central1);
cpl_imagelist_delete(imglist);
cpl_bivector_delete(offset);
}
|
perturbations.c | /** @file perturbations.c Documented perturbation module
*
* Julien Lesgourgues, 23.09.2010
*
* Deals with the perturbation evolution.
* This module has two purposes:
*
* - at the beginning; to initialize the perturbations, i.e. to
* integrate the perturbation equations, and store temporarily the terms
* contributing to the source functions as a function of conformal
* time. Then, to perform a few manipulations of these terms in order to
* infer the actual source functions \f$ S^{X} (k, \tau) \f$, and to
* store them as a function of conformal time inside an interpolation
* table.
*
* - at any time in the code; to evaluate the source functions at a
* given conformal time (by interpolating within the interpolation
* table).
*
* Hence the following functions can be called from other modules:
*
* -# perturb_init() at the beginning (but after background_init() and thermodynamics_init())
* -# perturb_sources_at_tau() at any later time
* -# perturb_free() at the end, when no more calls to perturb_sources_at_tau() are needed
*/
#include "perturbations.h"
/**
* Source function \f$ S^{X} (k, \tau) \f$ at a given conformal time tau.
*
* Evaluate source functions at given conformal time tau by reading
* the pre-computed table and interpolating.
*
* @param ppt Input: pointer to perturbation structure containing interpolation tables
* @param index_md Input: index of requested mode
* @param index_ic Input: index of requested initial condition
* @param index_type Input: index of requested source function type
* @param tau Input: any value of conformal time
* @param psource Output: vector (already allocated) of source function as a function of k
* @return the error status
*/
int perturb_sources_at_tau(
struct perturbs * ppt,
int index_md,
int index_ic,
int index_type,
double tau,
double * psource
) {
/** Summary: */
/** - interpolate in pre-computed table contained in ppt */
class_call(array_interpolate_two_bis(ppt->tau_sampling,
1,
0,
ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type],
ppt->k_size[index_md],
ppt->tau_size,
tau,
psource,
ppt->k_size[index_md],
ppt->error_message),
ppt->error_message,
ppt->error_message);
return _SUCCESS_;
}
/**
* Initialize the perturbs structure, and in particular the table of source functions.
*
* Main steps:
*
* - given the values of the flags describing which kind of
* perturbations should be considered (modes: scalar/vector/tensor,
* initial conditions, type of source functions needed...),
* initialize indices and wavenumber list
*
* - define the time sampling for the output source functions
*
* - for each mode (scalar/vector/tensor): initialize the indices of
* relevant perturbations, integrate the differential system,
* compute and store the source functions.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Output: Initialized perturbation structure
* @return the error status
*/
int perturb_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
/* running index for modes */
int index_md;
/* running index for initial conditions */
int index_ic;
/* running index for wavenumbers */
int index_k;
/* pointer to one struct perturb_workspace per thread (one if no openmp) */
struct perturb_workspace ** pppw;
/* background quantities */
double w_fld_ini, w_fld_0,dw_over_da_fld,integral_fld;
/* number of threads (always one if no openmp) */
int number_of_threads=1;
/* index of the thread (always 0 if no openmp) */
int thread=0;
/* This code can be optionally compiled with the openmp option for parallel computation.
Inside parallel regions, the use of the command "return" is forbidden.
For error management, instead of "return _FAILURE_", we will set the variable below
to "abort = _TRUE_". This will lead to a "return _FAILURE_" just after leaving the
parallel region. */
int abort;
/* unsigned integer that will be set to the size of the workspace */
size_t sz;
#ifdef _OPENMP
/* instrumentation times */
double tstart, tstop, tspent;
#endif
/** - perform preliminary checks */
if (ppt->has_perturbations == _FALSE_) {
if (ppt->perturbations_verbose > 0)
printf("No sources requested. Perturbation module skipped.\n");
return _SUCCESS_;
}
else {
if (ppt->perturbations_verbose > 0)
printf("Computing sources\n");
}
class_test((ppt->gauge == synchronous) && (pba->has_cdm == _FALSE_),
ppt->error_message,
"In the synchronous gauge, it is not self-consistent to assume no CDM: the later is used to define the initial timelike hypersurface. You can either add a negligible amount of CDM or switch to newtonian gauge");
class_test ((ppr->tight_coupling_approximation < first_order_MB) ||
(ppr->tight_coupling_approximation > compromise_CLASS),
ppt->error_message,
"your tight_coupling_approximation is set to %d, out of range defined in perturbations.h",ppr->tight_coupling_approximation);
class_test ((ppr->radiation_streaming_approximation < rsa_null) ||
(ppr->radiation_streaming_approximation > rsa_none),
ppt->error_message,
"your radiation_streaming_approximation is set to %d, out of range defined in perturbations.h",ppr->radiation_streaming_approximation);
if (pba->has_ur == _TRUE_) {
class_test ((ppr->ur_fluid_approximation < ufa_mb) ||
(ppr->ur_fluid_approximation > ufa_none),
ppt->error_message,
"your ur_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ur_fluid_approximation);
}
if (pba->has_ncdm == _TRUE_) {
class_test ((ppr->ncdm_fluid_approximation < ncdmfa_mb) ||
(ppr->ncdm_fluid_approximation > ncdmfa_none),
ppt->error_message,
"your ncdm_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ncdm_fluid_approximation);
if (ppt->has_nc_density == _TRUE_) {
if (ppt->perturbations_verbose > 0) {
fprintf(stdout," -> [WARNING:] You request the number count Cl's in presence of non-cold dark matter.\n Like in all previous CLASS and CLASSgal versions, this will be inferred from the total matter density,\n but it could make much more sense physically to compute it from the CDM+baryon density only.\n To get the latter behavior you would just need to change one line in transfer.c:\n search there for a comment starting with 'use here delta_cb'\n");
}
}
}
if (pba->has_fld == _TRUE_) {
/* check values of w_fld at initial time and today */
class_call(background_w_fld(pba, 0., &w_fld_ini,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
class_call(background_w_fld(pba,pba->a_today,&w_fld_0,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
class_test(w_fld_ini >= 0.,
ppt->error_message,
"The fluid is meant to be negligible at early time, and unimportant for defining the initial conditions of other species. You are using parameters for which this assumption may break down, since at early times you have w_fld(a--->0) = %e >= 0",w_fld_ini);
if (pba->use_ppf == _FALSE_) {
class_test((w_fld_ini +1.0)*(w_fld_0+1.0) <= 0.0,
ppt->error_message,
"w crosses -1 between the infinite past and today, and this would lead to divergent perturbation equations for the fluid perturbations. Try to switch to PPF scheme: use_ppf = yes");
/* the next check is meaningful at least for w(a) = w0 + wa*(1-a/a0); for general formulas and with use_ppf=no, you may prefer to comment it out... */
class_test((w_fld_0 == -1.) && (dw_over_da_fld == 0.),
ppt->error_message,
"Your choice of a fluid with (w0,wa)=(-1,0) is not valid due to instabilities in the unphysical perturbations of such a fluid. Try instead with a plain cosmological constant or with PPF scheme: use_ppf = yes");
}
}
if (pba->has_dcdm == _TRUE_) {
class_test((ppt->has_cdi == _TRUE_) || (ppt->has_bi == _TRUE_) || (ppt->has_nid == _TRUE_) || (ppt->has_niv == _TRUE_),
ppt->error_message,
"Non-adiabatic initial conditions not coded in presence of decaying dark matter");
}
class_test(ppt->has_vectors == _TRUE_,
ppt->error_message,
"Vectors not coded yet");
if ((ppt->has_niv == _TRUE_) && (ppt->perturbations_verbose > 0)) {
printf("Warning: the niv initial conditions in CLASS (and also in CAMB) should still be double-checked: if you want to do it and send feedback, you are welcome!\n");
}
if (ppt->has_tensors == _TRUE_) {
ppt->evolve_tensor_ur = _FALSE_;
ppt->evolve_tensor_ncdm = _FALSE_;
switch (ppt->tensor_method) {
case (tm_photons_only):
break;
case (tm_massless_approximation):
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_))
ppt->evolve_tensor_ur = _TRUE_;
break;
case (tm_exact):
if (pba->has_ur == _TRUE_)
ppt->evolve_tensor_ur = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->evolve_tensor_ncdm = _TRUE_;
break;
}
}
/** - initialize all indices and lists in perturbs structure using perturb_indices_of_perturbs() */
class_call(perturb_indices_of_perturbs(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
if (ppt->z_max_pk > pth->z_rec) {
class_test(ppt->has_cmb == _TRUE_,
ppt->error_message,
"You requested a very high z_pk=%e, higher than z_rec=%e. This works very well when you don't ask for a calculation of the CMB source function(s). Remove any CMB from your output and try e.g. with 'output=mTk' or 'output=mTk,vTk'",
ppt->z_max_pk,
pth->z_rec);
class_test(ppt->has_source_delta_m == _TRUE_,
ppt->error_message,
"You requested a very high z_pk=%e, higher than z_rec=%e. This works very well when you ask only transfer functions, e.g. with 'output=mTk' or 'output=mTk,vTk'. But if you need the total matter (e.g. with 'mPk', 'dCl', etc.) there is an issue with the calculation of delta_m at very early times. By default, delta_m is a gauge-invariant variable (the density fluctuation in comoving gauge) and this quantity is hard to get accurately at very early times. The solution is to define delta_m as the density fluctuation in the current gauge, synchronous or newtonian. For the moment this must be done manually by commenting the line 'ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2;' in perturb_sources(). In the future there will be an option for doing it in an easier way.",
ppt->z_max_pk,
pth->z_rec);
}
/** - define the common time sampling for all sources using
perturb_timesampling_for_sources() */
class_call(perturb_timesampling_for_sources(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
/** - if we want to store perturbations, write titles and allocate storage */
class_call(perturb_prepare_output(pba,ppt,ppr),
ppt->error_message,
ppt->error_message);
/** - create an array of workspaces in multi-thread case */
#ifdef _OPENMP
/************************/
/* For use with CONCEPT */
/************************/
if (pba->num_threads != -1) {
/**
* Explicitly set the number of OpenMP threads.
* Note that the value of OMP_NUM_THREADS is now completely ignored.
*/
omp_set_num_threads(pba->num_threads);
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
#pragma omp parallel
{
number_of_threads = omp_get_num_threads();
}
#endif
class_alloc(pppw,number_of_threads * sizeof(struct perturb_workspace *),ppt->error_message);
/** - loop over modes (scalar, tensors, etc). For each mode: */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
if (ppt->perturbations_verbose > 1)
printf("Evolving mode %d/%d\n",index_md+1,ppt->md_size);
abort = _FALSE_;
sz = sizeof(struct perturb_workspace);
#pragma omp parallel \
shared(pppw,ppr,pba,pth,ppt,index_md,abort,number_of_threads) \
private(thread) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
#endif
/** - --> (a) create a workspace (one per thread in multi-thread case) */
class_alloc_parallel(pppw[thread],sz,ppt->error_message);
/** - --> (b) initialize indices of vectors of perturbations with perturb_indices_of_current_vectors() */
class_call_parallel(perturb_workspace_init(ppr,
pba,
pth,
ppt,
index_md,
pppw[thread]),
ppt->error_message,
ppt->error_message);
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
/** - --> (c) loop over initial conditions and wavenumbers; for each of them, evolve perturbations and compute source functions with perturb_solve() */
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
if (ppt->perturbations_verbose > 1) {
printf("Evolving ic %d/%d\n",index_ic+1,ppt->ic_size[index_md]);
printf("evolving %d wavenumbers\n",ppt->k_size[index_md]);
}
abort = _FALSE_;
#pragma omp parallel \
shared(pppw,ppr,pba,pth,ppt,index_md,index_ic,abort,number_of_threads) \
private(index_k,thread,tstart,tstop,tspent) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
tspent=0.;
#endif
#pragma omp for schedule (dynamic)
/* integrating backwards is slightly more optimal for parallel runs */
//for (index_k = 0; index_k < ppt->k_size; index_k++) {
for (index_k = ppt->k_size[index_md]-1; index_k >=0; index_k--) {
/************************/
/* For use with CONCEPT */
/************************/
if ((abort == _FALSE_) && (ppt->perturbations_verbose < 0)) {
printf(
"%*sNode %d, thread %d: Evolving mode k = %.3e/Mpc (%d/%d)\n",
pba->indentation, "",
pba->node,
thread,
ppt->k[index_md][index_k],
index_k+1,
ppt->k_size[index_md]
);
fflush(stdout);
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
if ((ppt->perturbations_verbose > 2) && (abort == _FALSE_)) {
printf("evolving mode k=%e /Mpc (%d/%d)",ppt->k[index_md][index_k],index_k+1,ppt->k_size[index_md]);
if (pba->sgnK != 0)
printf(" (for scalar modes, corresponds to nu=%e)",sqrt(ppt->k[index_md][index_k]*ppt->k[index_md][index_k]+pba->K)/sqrt(pba->sgnK*pba->K));
printf("\n");
}
#ifdef _OPENMP
tstart = omp_get_wtime();
#endif
class_call_parallel(perturb_solve(ppr,
pba,
pth,
ppt,
index_md,
index_ic,
index_k,
pppw[thread]),
ppt->error_message,
ppt->error_message);
#ifdef _OPENMP
tstop = omp_get_wtime();
tspent += tstop-tstart;
#endif
#pragma omp flush(abort)
} /* end of loop over wavenumbers */
#ifdef _OPENMP
if (ppt->perturbations_verbose>1)
printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n",
__func__,tspent,omp_get_thread_num());
#endif
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
} /* end of loop over initial conditions */
abort = _FALSE_;
#pragma omp parallel \
shared(pppw,ppt,index_md,abort,number_of_threads) \
private(thread) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
#endif
class_call_parallel(perturb_workspace_free(ppt,index_md,pppw[thread]),
ppt->error_message,
ppt->error_message);
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
} /* end loop over modes */
free(pppw);
return _SUCCESS_;
}
/**
* Free all memory space allocated by perturb_init().
*
* To be called at the end of each run, only when no further calls to
* perturb_sources_at_tau() are needed.
*
* @param ppt Input: perturbation structure to be freed
* @return the error status
*/
int perturb_free(
struct perturbs * ppt
) {
int index_md,index_ic,index_type;
int filenum;
if (ppt->has_perturbations == _TRUE_) {
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) {
free(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type]);
}
}
free(ppt->sources[index_md]);
free(ppt->k[index_md]);
}
free(ppt->tau_sampling);
free(ppt->tp_size);
free(ppt->ic_size);
free(ppt->k);
free(ppt->k_size_cmb);
free(ppt->k_size_cl);
free(ppt->k_size);
free(ppt->sources);
/** Stuff related to perturbations output: */
/** - Free non-NULL pointers */
if (ppt->index_k_output_values != NULL)
free(ppt->index_k_output_values);
for (filenum = 0; filenum<_MAX_NUMBER_OF_K_FILES_; filenum++){
if (ppt->scalar_perturbations_data[filenum] != NULL)
free(ppt->scalar_perturbations_data[filenum]);
if (ppt->vector_perturbations_data[filenum] != NULL)
free(ppt->vector_perturbations_data[filenum]);
if (ppt->tensor_perturbations_data[filenum] != NULL)
free(ppt->tensor_perturbations_data[filenum]);
}
}
return _SUCCESS_;
}
/**
* Initialize all indices and allocate most arrays in perturbs structure.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input/Output: Initialized perturbation structure
* @return the error status
*/
int perturb_indices_of_perturbs(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
int index_type;
int index_md;
int index_ic;
int index_type_common;
/** - count modes (scalar, vector, tensor) and assign corresponding indices */
index_md = 0;
class_define_index(ppt->index_md_scalars,ppt->has_scalars,index_md,1);
class_define_index(ppt->index_md_vectors,ppt->has_vectors,index_md,1);
class_define_index(ppt->index_md_tensors,ppt->has_tensors,index_md,1);
ppt->md_size = index_md;
class_test(index_md == 0,
ppt->error_message,
"you should have at least one out of {scalars, vectors, tensors} !!!");
/** - allocate array of number of types for each mode, ppt->tp_size[index_md] */
class_alloc(ppt->tp_size,ppt->md_size*sizeof(int),ppt->error_message);
/** - allocate array of number of initial conditions for each mode, ppt->ic_size[index_md] */
class_alloc(ppt->ic_size,ppt->md_size*sizeof(int),ppt->error_message);
/** - allocate array of arrays of source functions for each mode, ppt->source[index_md] */
class_alloc(ppt->sources,ppt->md_size * sizeof(double *),ppt->error_message);
/** - initialization of all flags to false (will eventually be set to true later) */
ppt->has_cmb = _FALSE_;
ppt->has_lss = _FALSE_;
ppt->has_source_t = _FALSE_;
ppt->has_source_p = _FALSE_;
ppt->has_source_delta_m = _FALSE_;
ppt->has_source_delta_cb = _FALSE_;
ppt->has_source_delta_g = _FALSE_;
ppt->has_source_delta_b = _FALSE_;
ppt->has_source_delta_cdm = _FALSE_;
ppt->has_source_delta_dcdm = _FALSE_;
ppt->has_source_delta_fld = _FALSE_;
ppt->has_source_delta_scf = _FALSE_;
ppt->has_source_delta_dr = _FALSE_;
ppt->has_source_delta_ur = _FALSE_;
ppt->has_source_delta_ncdm = _FALSE_;
ppt->has_source_theta_m = _FALSE_;
ppt->has_source_theta_cb = _FALSE_;
ppt->has_source_theta_g = _FALSE_;
ppt->has_source_theta_b = _FALSE_;
ppt->has_source_theta_cdm = _FALSE_;
ppt->has_source_theta_dcdm = _FALSE_;
ppt->has_source_theta_fld = _FALSE_;
ppt->has_source_theta_scf = _FALSE_;
ppt->has_source_theta_dr = _FALSE_;
ppt->has_source_theta_ur = _FALSE_;
ppt->has_source_theta_ncdm = _FALSE_;
ppt->has_source_phi = _FALSE_;
ppt->has_source_phi_prime = _FALSE_;
ppt->has_source_phi_plus_psi = _FALSE_;
ppt->has_source_psi = _FALSE_;
ppt->has_source_h = _FALSE_;
ppt->has_source_h_prime = _FALSE_;
ppt->has_source_eta = _FALSE_;
ppt->has_source_eta_prime = _FALSE_;
/** - source flags and indices, for sources that all modes have in
common (temperature, polarization, ...). For temperature, the
term t2 is always non-zero, while other terms are non-zero only
for scalars and vectors. For polarization, the term e is always
non-zero, while the term b is only for vectors and tensors. */
if (ppt->has_cl_cmb_temperature == _TRUE_) {
ppt->has_source_t = _TRUE_;
ppt->has_cmb = _TRUE_;
}
if (ppt->has_cl_cmb_polarization == _TRUE_) {
ppt->has_source_p = _TRUE_;
ppt->has_cmb = _TRUE_;
}
index_type = 0;
class_define_index(ppt->index_tp_t2,ppt->has_source_t,index_type,1);
class_define_index(ppt->index_tp_p,ppt->has_source_p,index_type,1);
index_type_common = index_type;
/* indices for perturbed recombination */
class_define_index(ppt->index_tp_perturbed_recombination_delta_temp,ppt->has_perturbed_recombination,index_type,1);
class_define_index(ppt->index_tp_perturbed_recombination_delta_chi,ppt->has_perturbed_recombination,index_type,1);
/** - define k values with perturb_get_k_list() */
class_call(perturb_get_k_list(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
/** - loop over modes. Initialize flags and indices which are specific to each mode. */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
/** - (a) scalars */
if (_scalars_) {
/** - --> source flags and indices, for sources that are specific to scalars */
if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) || (ppt->has_cl_lensing_potential)) {
ppt->has_lss = _TRUE_;
ppt->has_source_phi_plus_psi = _TRUE_;
}
if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_nl_corrections_based_on_delta_m)) {
ppt->has_lss = _TRUE_;
ppt->has_source_delta_m = _TRUE_;
if (pba->has_ncdm == _TRUE_){
ppt->has_source_delta_cb = _TRUE_;
}
}
if (ppt->has_density_transfers == _TRUE_) {
ppt->has_lss = _TRUE_;
ppt->has_source_delta_g = _TRUE_;
ppt->has_source_delta_b = _TRUE_;
if (pba->has_cdm == _TRUE_)
ppt->has_source_delta_cdm = _TRUE_;
if (pba->has_dcdm == _TRUE_)
ppt->has_source_delta_dcdm = _TRUE_;
if (pba->has_fld == _TRUE_)
ppt->has_source_delta_fld = _TRUE_;
if (pba->has_scf == _TRUE_)
ppt->has_source_delta_scf = _TRUE_;
if (pba->has_ur == _TRUE_)
ppt->has_source_delta_ur = _TRUE_;
if (pba->has_dr == _TRUE_)
ppt->has_source_delta_dr = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->has_source_delta_ncdm = _TRUE_;
// Thanks to the following lines, (phi,psi) are also stored as sources
// (Obtained directly in newtonian gauge, infereed from (h,eta) in synchronous gauge).
// If density transfer functions are requested in the (default) CLASS format,
// (phi, psi) will be appended to the delta_i's in the final output.
ppt->has_source_phi = _TRUE_;
ppt->has_source_psi = _TRUE_;
}
if (ppt->has_velocity_transfers == _TRUE_) {
ppt->has_lss = _TRUE_;
ppt->has_source_theta_g = _TRUE_;
ppt->has_source_theta_b = _TRUE_;
if ((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous))
ppt->has_source_theta_cdm = _TRUE_;
if (pba->has_dcdm == _TRUE_)
ppt->has_source_theta_dcdm = _TRUE_;
if (pba->has_fld == _TRUE_)
ppt->has_source_theta_fld = _TRUE_;
if (pba->has_scf == _TRUE_)
ppt->has_source_theta_scf = _TRUE_;
if (pba->has_ur == _TRUE_)
ppt->has_source_theta_ur = _TRUE_;
if (pba->has_dr == _TRUE_)
ppt->has_source_theta_dr = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->has_source_theta_ncdm = _TRUE_;
}
if (ppt->has_cl_number_count == _TRUE_) {
ppt->has_lss = _TRUE_;
if (ppt->has_nc_density == _TRUE_) {
ppt->has_source_delta_m = _TRUE_;
}
if (ppt->has_nc_rsd == _TRUE_) {
ppt->has_source_theta_m = _TRUE_;
if (pba->has_ncdm == _TRUE_)
/* we may not need theta_cb at all, rsd always defined for
the total matter, but at least this is made
available */
ppt->has_source_theta_cb = _TRUE_;
}
if (ppt->has_nc_lens == _TRUE_) {
ppt->has_source_phi_plus_psi = _TRUE_;
}
if (ppt->has_nc_gr == _TRUE_) {
ppt->has_source_phi = _TRUE_;
ppt->has_source_psi = _TRUE_;
ppt->has_source_phi_prime = _TRUE_;
ppt->has_source_phi_plus_psi = _TRUE_;
}
}
if ( ppt->has_metricpotential_transfers == _TRUE_ ) {
if (ppt->gauge == newtonian) {
ppt->has_source_phi = _TRUE_;
ppt->has_source_psi = _TRUE_;
ppt->has_source_phi_prime = _TRUE_;
}
if (ppt->gauge == synchronous) {
ppt->has_source_h = _TRUE_;
ppt->has_source_h_prime = _TRUE_;
ppt->has_source_eta = _TRUE_;
ppt->has_source_eta_prime = _TRUE_;
}
}
index_type = index_type_common;
class_define_index(ppt->index_tp_t0, ppt->has_source_t, index_type,1);
class_define_index(ppt->index_tp_t1, ppt->has_source_t, index_type,1);
class_define_index(ppt->index_tp_delta_m, ppt->has_source_delta_m, index_type,1);
class_define_index(ppt->index_tp_delta_cb, ppt->has_source_delta_cb, index_type,1);
class_define_index(ppt->index_tp_delta_g, ppt->has_source_delta_g, index_type,1);
class_define_index(ppt->index_tp_delta_b, ppt->has_source_delta_b, index_type,1);
class_define_index(ppt->index_tp_delta_cdm, ppt->has_source_delta_cdm, index_type,1);
class_define_index(ppt->index_tp_delta_dcdm, ppt->has_source_delta_dcdm,index_type,1);
class_define_index(ppt->index_tp_delta_fld, ppt->has_source_delta_fld, index_type,1);
class_define_index(ppt->index_tp_delta_scf, ppt->has_source_delta_scf, index_type,1);
class_define_index(ppt->index_tp_delta_dr, ppt->has_source_delta_dr, index_type,1);
class_define_index(ppt->index_tp_delta_ur, ppt->has_source_delta_ur, index_type,1);
class_define_index(ppt->index_tp_delta_ncdm1,ppt->has_source_delta_ncdm,index_type,pba->N_ncdm);
class_define_index(ppt->index_tp_theta_m, ppt->has_source_theta_m, index_type,1);
class_define_index(ppt->index_tp_theta_cb, ppt->has_source_theta_cb, index_type,1);
class_define_index(ppt->index_tp_theta_g, ppt->has_source_theta_g, index_type,1);
class_define_index(ppt->index_tp_theta_b, ppt->has_source_theta_b, index_type,1);
class_define_index(ppt->index_tp_theta_cdm, ppt->has_source_theta_cdm, index_type,1);
class_define_index(ppt->index_tp_theta_dcdm, ppt->has_source_theta_dcdm,index_type,1);
class_define_index(ppt->index_tp_theta_fld, ppt->has_source_theta_fld, index_type,1);
class_define_index(ppt->index_tp_theta_scf, ppt->has_source_theta_scf, index_type,1);
class_define_index(ppt->index_tp_theta_dr, ppt->has_source_theta_dr, index_type,1);
class_define_index(ppt->index_tp_theta_ur, ppt->has_source_theta_ur, index_type,1);
class_define_index(ppt->index_tp_theta_ncdm1,ppt->has_source_theta_ncdm,index_type,pba->N_ncdm);
class_define_index(ppt->index_tp_phi, ppt->has_source_phi, index_type,1);
class_define_index(ppt->index_tp_phi_prime, ppt->has_source_phi_prime, index_type,1);
class_define_index(ppt->index_tp_phi_plus_psi,ppt->has_source_phi_plus_psi,index_type,1);
class_define_index(ppt->index_tp_psi, ppt->has_source_psi, index_type,1);
class_define_index(ppt->index_tp_h, ppt->has_source_h, index_type,1);
class_define_index(ppt->index_tp_h_prime, ppt->has_source_h_prime, index_type,1);
class_define_index(ppt->index_tp_eta, ppt->has_source_eta, index_type,1);
class_define_index(ppt->index_tp_eta_prime, ppt->has_source_eta_prime, index_type,1);
ppt->tp_size[index_md] = index_type;
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for scalars, so you should have at least one non-zero scalar source type (temperature, polarization, lensing/gravitational potential, ...). Please adjust your input.");
/** - --> count scalar initial conditions (for scalars: ad, cdi, nid, niv; for tensors: only one) and assign corresponding indices */
index_ic = 0;
class_define_index(ppt->index_ic_ad, ppt->has_ad, index_ic,1);
class_define_index(ppt->index_ic_bi, ppt->has_bi, index_ic,1);
class_define_index(ppt->index_ic_cdi,ppt->has_cdi,index_ic,1);
class_define_index(ppt->index_ic_nid,ppt->has_nid,index_ic,1);
class_define_index(ppt->index_ic_niv,ppt->has_niv,index_ic,1);
ppt->ic_size[index_md] = index_ic;
class_test(index_ic == 0,
ppt->error_message,
"you should have at least one adiabatic or isocurvature initial condition...} !!!");
}
/** - (b) vectors */
if (_vectors_) {
/** - --> source flags and indices, for sources that are specific to vectors */
index_type = index_type_common;
class_define_index(ppt->index_tp_t1,ppt->has_source_t,index_type,1);
ppt->tp_size[index_md] = index_type;
/*
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for vectors, so you should have at least one non-zero vector source type (temperature or polarization). Please adjust your input.");
*/
/** - --> initial conditions for vectors*/
index_ic = 0;
/* not coded yet */
ppt->ic_size[index_md] = index_ic;
}
/** - (c) tensors */
if (_tensors_) {
/** - --> source flags and indices, for sources that are specific to tensors */
index_type = index_type_common;
/* nothing specific, unlike for vectors and scalars! */
ppt->tp_size[index_md] = index_type;
/*
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for tensors, so you should have at least one non-zero tensor source type (temperature or polarization). Please adjust your input.");
*/
/** - --> only one initial condition for tensors*/
index_ic = 0;
class_define_index(ppt->index_ic_ten,_TRUE_,index_ic,1);
ppt->ic_size[index_md] = index_ic;
}
/** - (d) for each mode, allocate array of arrays of source functions for each initial conditions and wavenumber, (ppt->source[index_md])[index_ic][index_type] */
class_alloc(ppt->sources[index_md],
ppt->ic_size[index_md] * ppt->tp_size[index_md] * sizeof(double *),
ppt->error_message);
}
return _SUCCESS_;
}
/**
* Define time sampling for source functions.
*
* For each type, compute the list of values of tau at which sources
* will be sampled. Knowing the number of tau values, allocate all
* arrays of source functions.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input/Output: Initialized perturbation structure
* @return the error status
*/
int perturb_timesampling_for_sources(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
int counter;
int index_md;
int index_type;
int index_ic;
int last_index_back;
int last_index_thermo;
int first_index_back;
int first_index_thermo;
double tau;
double tau_ini;
double tau_lower;
double tau_upper;
double tau_mid;
double timescale_source;
double rate_thermo;
double rate_isw_squared;
double a_prime_over_a;
double a_primeprime_over_a;
double * pvecback;
double * pvecthermo;
/** - allocate background/thermodynamics vectors */
class_alloc(pvecback,pba->bg_size_short*sizeof(double),ppt->error_message);
class_alloc(pvecthermo,pth->th_size*sizeof(double),ppt->error_message);
/** - first, just count the number of sampling points in order to allocate the array containing all values */
/** - (a) if CMB requested, first sampling point = when the universe
stops being opaque; otherwise, start sampling gravitational
potential at recombination [however, if perturbed recombination
is requested, we also need to start the system before
recombination. Otherwise, the initial conditions for gas
temperature and ionization fraction perturbations (delta_T = 1/3
delta_b, delta_x_e) are not valid]. */
if ((ppt->has_cmb == _TRUE_)||(ppt->has_perturbed_recombination == _TRUE_)) {
/* using bisection, search time tau such that the ratio of thermo
to Hubble time scales tau_c/tau_h=aH/kappa' is equal to
start_sources_at_tau_c_over_tau_h */
tau_lower = pth->tau_ini;
class_call(background_at_tau(pba,
tau_lower,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
class_test(pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] >
ppr->start_sources_at_tau_c_over_tau_h,
ppt->error_message,
"your choice of initial time for computing sources is inappropriate: it corresponds to an earlier time than the one at which the integration of thermodynamical variables started (tau=%g). You should increase either 'start_sources_at_tau_c_over_tau_h' or 'recfast_z_initial'\n",
tau_lower);
tau_upper = pth->tau_rec;
class_call(background_at_tau(pba,
tau_upper,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
class_test(pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] <
ppr->start_sources_at_tau_c_over_tau_h,
ppt->error_message,
"your choice of initial time for computing sources is inappropriate: it corresponds to a time after recombination. You should decrease 'start_sources_at_tau_c_over_tau_h'\n");
tau_mid = 0.5*(tau_lower + tau_upper);
while (tau_upper - tau_lower > ppr->tol_tau_approx) {
class_call(background_at_tau(pba,
tau_mid,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] >
ppr->start_sources_at_tau_c_over_tau_h)
tau_upper = tau_mid;
else
tau_lower = tau_mid;
tau_mid = 0.5*(tau_lower + tau_upper);
}
tau_ini = tau_mid;
}
else {
/* check the time corresponding to the highest redshift requested in output plus one */
class_call(background_tau_of_z(pba,
ppt->z_max_pk+1,
&tau_ini),
pba->error_message,
ppt->error_message);
/* obsolete: previous choice was to start always at recombination time */
/* tau_ini = pth->tau_rec; */
/* set values of first_index_back/thermo */
class_call(background_at_tau(pba,
tau_ini,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
}
/** - (b) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where:
- --> if CMB requested:
timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$;
timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and
timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today.
- --> if CMB not requested:
timescale_source = 1/aH; repeat till today.
*/
counter = 1;
last_index_back = first_index_back;
last_index_thermo = first_index_thermo;
tau = tau_ini;
while (tau < pba->conformal_age) {
class_call(background_at_tau(pba,
tau,
pba->short_info,
pba->inter_closeby,
&last_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&last_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (ppt->has_cmb == _TRUE_) {
/* variation rate of thermodynamics variables */
rate_thermo = pvecthermo[pth->index_th_rate];
/* variation rate of metric due to late ISW effect (important at late times) */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a]
+ 2. * a_prime_over_a * a_prime_over_a;
rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a);
/* compute rate */
timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared);
}
else {
/* variation rate given by Hubble time */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
timescale_source = a_prime_over_a;
}
/* check it is non-zero */
class_test(timescale_source == 0.,
ppt->error_message,
"null evolution rate, integration is diverging");
/* compute inverse rate */
timescale_source = 1./timescale_source;
class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation,
ppt->error_message,
"integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source);
tau = tau + ppr->perturb_sampling_stepsize*timescale_source;
counter++;
}
/** - --> infer total number of time steps, ppt->tau_size */
ppt->tau_size = counter;
/** - --> allocate array of time steps, ppt->tau_sampling[index_tau] */
class_alloc(ppt->tau_sampling,ppt->tau_size * sizeof(double),ppt->error_message);
/** - --> repeat the same steps, now filling the array with each tau value: */
/** - --> (b.1.) first sampling point = when the universe stops being opaque */
counter = 0;
ppt->tau_sampling[counter]=tau_ini;
/** - --> (b.2.) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where
timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$;
timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and
timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today.
If CMB not requested:
timescale_source = 1/aH; repeat till today. */
last_index_back = first_index_back;
last_index_thermo = first_index_thermo;
tau = tau_ini;
while (tau < pba->conformal_age) {
class_call(background_at_tau(pba,
tau,
pba->short_info,
pba->inter_closeby,
&last_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&last_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (ppt->has_cmb == _TRUE_) {
/* variation rate of thermodynamics variables */
rate_thermo = pvecthermo[pth->index_th_rate];
/* variation rate of metric due to late ISW effect (important at late times) */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a]
+ 2. * a_prime_over_a * a_prime_over_a;
rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a);
/* compute rate */
timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared);
}
else {
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
timescale_source = a_prime_over_a;
}
/* check it is non-zero */
class_test(timescale_source == 0.,
ppt->error_message,
"null evolution rate, integration is diverging");
/* compute inverse rate */
timescale_source = 1./timescale_source;
class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation,
ppt->error_message,
"integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source);
tau = tau + ppr->perturb_sampling_stepsize*timescale_source;
counter++;
ppt->tau_sampling[counter]=tau;
}
/** - last sampling point = exactly today */
ppt->tau_sampling[counter] = pba->conformal_age;
free(pvecback);
free(pvecthermo);
/** - loop over modes, initial conditions and types. For each of
them, allocate array of source functions. */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) {
class_alloc(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type],
ppt->k_size[index_md] * ppt->tau_size * sizeof(double),
ppt->error_message);
}
}
}
return _SUCCESS_;
}
/**
* Define the number of comoving wavenumbers using the information
* passed in the precision structure.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to perturbation structure
* @return the error status
*/
int perturb_get_k_list(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
int index_k, index_k_output, index_mode;
double k,k_min=0.,k_rec,step,tau1;
double * k_max_cmb;
double * k_max_cl;
double k_max=0.;
double scale2;
double *tmp_k_list;
int newk_size, index_newk, add_k_output_value;
/** Summary: */
class_test(ppr->k_step_transition == 0.,
ppt->error_message,
"stop to avoid division by zero");
class_test(pth->rs_rec == 0.,
ppt->error_message,
"stop to avoid division by zero");
/** - allocate arrays related to k list for each mode */
class_alloc(ppt->k_size_cmb,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k_size_cl,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k_size,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k,
ppt->md_size*sizeof(double*),
ppt->error_message);
class_calloc(k_max_cmb,
ppt->md_size,
sizeof(double),
ppt->error_message);
class_calloc(k_max_cl,
ppt->md_size,
sizeof(double),
ppt->error_message);
/** - scalar modes */
if (ppt->has_scalars == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((8.-1.e-4)*pba->K);
}
/** - --> find k_max (as well as k_max_cmb[ppt->index_md_scalars], k_max_cl[ppt->index_md_scalars]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_scalars] = k_min;
k_max_cl[ppt->index_md_scalars] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb[ppt->index_md_scalars] : */
/* choose a k_max_cmb[ppt->index_md_scalars] corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl[ppt->index_md_scalars]*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_scalars] = ppr->k_max_tau0_over_l_max*ppt->l_scalar_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_scalars] = k_max_cmb[ppt->index_md_scalars];
k_max = k_max_cmb[ppt->index_md_scalars];
/* find k_max_cl[ppt->index_md_scalars] : */
/* if we need density/lensing Cl's, we must impose a stronger condition,
such that the minimum wavelength on the shell corresponding
to the center of smallest redshift bin is seen under an
angle smaller than pi/lmax. So we must multiply our previous
k_max_cl[ppt->index_md_scalars] by the ratio tau0/(tau0-tau[center of smallest
redshift bin]). Note that we could do the same with the
lensing potential if we needed a very precise C_l^phi-phi at
large l. We don't do it by default, because the lensed ClT,
ClE would be marginally affected. */
if ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_)) {
class_call(background_tau_of_z(pba,
ppt->selection_mean[0],
&tau1),
pba->error_message,
ppt->error_message);
k_max_cl[ppt->index_md_scalars] = MAX(k_max_cl[ppt->index_md_scalars],ppr->k_max_tau0_over_l_max*ppt->l_lss_max/(pba->conformal_age-tau1)); // to be very accurate we should use angular diameter distance to given redshift instead of comoving radius: would implement corrections depending on curvature
k_max = k_max_cl[ppt->index_md_scalars];
}
}
/* find k_max: */
if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_))
k_max = MAX(k_max,ppt->k_max_for_pk);
if (ppt->has_nl_corrections_based_on_delta_m == _TRUE_)
k_max = MAX(k_max,ppr->halofit_min_k_max);
/** - --> test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in the vicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_scalars],
((int)((k_max_cmb[ppt->index_md_scalars]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+
(int)(MAX(ppr->k_per_decade_for_pk,ppr->k_per_decade_for_bao)*log(k_max/k_min)/log(10.))+3)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_scalars] */
while (k < k_max_cmb[ppt->index_md_scalars]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_scalars] = index_k;
/* values until k_max_cl[ppt->index_md_scalars] */
while (k < k_max_cl[ppt->index_md_scalars]) {
k *= pow(10.,1./(ppr->k_per_decade_for_pk
+(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk)
*(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4)))));
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size_cl[ppt->index_md_scalars] = index_k;
/* values until k_max */
while (k < k_max) {
k *= pow(10.,1./(ppr->k_per_decade_for_pk
+(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk)
*(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4)))));
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size[ppt->index_md_scalars] = index_k;
class_realloc(ppt->k[ppt->index_md_scalars],
ppt->k[ppt->index_md_scalars],
ppt->k_size[ppt->index_md_scalars]*sizeof(double),
ppt->error_message);
}
/** - vector modes */
if (ppt->has_vectors == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((7.-1.e-4)*pba->K);
}
/** - --> find k_max (as well as k_max_cmb[ppt->index_md_vectors], k_max_cl[ppt->index_md_vectors]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_vectors] = k_min;
k_max_cl[ppt->index_md_vectors] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb: */
/* choose a k_max_cmb corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_vectors] = ppr->k_max_tau0_over_l_max*ppt->l_vector_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_vectors] = k_max_cmb[ppt->index_md_vectors];
k_max = k_max_cmb[ppt->index_md_vectors];
}
/** - --> test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in the vicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_vectors],
((int)((k_max_cmb[ppt->index_md_vectors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_vectors][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_vectors] */
while (k < k_max_cmb[ppt->index_md_vectors]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_vectors][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_vectors] = index_k;
ppt->k_size_cl[ppt->index_md_vectors] = index_k;
ppt->k_size[ppt->index_md_vectors] = index_k;
class_realloc(ppt->k[ppt->index_md_vectors],
ppt->k[ppt->index_md_vectors],
ppt->k_size[ppt->index_md_vectors]*sizeof(double),
ppt->error_message);
}
/** - tensor modes */
if (ppt->has_tensors == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((6.-1.e-4)*pba->K);
}
/** - --> find k_max (as well as k_max_cmb[ppt->index_md_tensors], k_max_cl[ppt->index_md_tensors]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_tensors] = k_min;
k_max_cl[ppt->index_md_tensors] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb[ppt->index_md_tensors]: */
/* choose a k_max_cmb[ppt->index_md_tensors] corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl[ppt->index_md_tensors]*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_tensors] = ppr->k_max_tau0_over_l_max*ppt->l_tensor_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_tensors] = k_max_cmb[ppt->index_md_tensors];
k_max = k_max_cmb[ppt->index_md_tensors];
}
/** - --> test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in the vicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_tensors],
((int)((k_max_cmb[ppt->index_md_tensors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_tensors][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_tensors] */
while (k < k_max_cmb[ppt->index_md_tensors]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_tensors][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_tensors][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_tensors] = index_k;
ppt->k_size_cl[ppt->index_md_tensors] = index_k;
ppt->k_size[ppt->index_md_tensors] = index_k;
class_realloc(ppt->k[ppt->index_md_tensors],
ppt->k[ppt->index_md_tensors],
ppt->k_size[ppt->index_md_tensors]*sizeof(double),
ppt->error_message);
}
/** - If user asked for k_output_values, add those to all k lists: */
if (ppt->k_output_values_num>0){
/* Allocate storage */
class_alloc(ppt->index_k_output_values,sizeof(double)*ppt->md_size*ppt->k_output_values_num,ppt->error_message);
/** - --> Find indices in ppt->k[index_md] corresponding to 'k_output_values'.
We are assuming that ppt->k is sorted and growing, and we have made sure
that ppt->k_output_values is also sorted and growing.*/
for (index_mode=0; index_mode<ppt->md_size; index_mode++){
newk_size = ppt->k_size[index_mode]+ppt->k_output_values_num;
class_alloc(tmp_k_list,sizeof(double)*newk_size,ppt->error_message);
index_k=0;
index_k_output=0;
for (index_newk=0; index_newk<newk_size; index_newk++){
/** - --> Decide if we should add k_output_value now. This has to be this complicated, since we
can only compare the k-values when both indices are in range.*/
if (index_k >= ppt->k_size[index_mode])
add_k_output_value = _TRUE_;
else if (index_k_output >= ppt->k_output_values_num)
add_k_output_value = _FALSE_;
else if (ppt->k_output_values[index_k_output] < ppt->k[index_mode][index_k])
add_k_output_value = _TRUE_;
else
add_k_output_value = _FALSE_;
if (add_k_output_value == _TRUE_){
tmp_k_list[index_newk] = ppt->k_output_values[index_k_output];
ppt->index_k_output_values[index_mode*ppt->k_output_values_num+index_k_output]=index_newk;
index_k_output++;
}
else{
tmp_k_list[index_newk] = ppt->k[index_mode][index_k];
index_k++;
}
}
free(ppt->k[index_mode]);
ppt->k[index_mode] = tmp_k_list;
ppt->k_size[index_mode] = newk_size;
index_k = newk_size-1;
while (ppt->k[index_mode][index_k] > k_max_cl[index_mode])
index_k--;
ppt->k_size_cl[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]);
index_k = newk_size-1;
while (ppt->k[index_mode][index_k] > k_max_cmb[index_mode])
index_k--;
ppt->k_size_cmb[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]);
/** - --> The two MIN statements are here because in a normal run, the cl and cmb
arrays contain a single k value larger than their respective k_max.
We are mimicking this behavior. */
}
}
/* For testing, can be useful to print the k list in a file:
FILE * out=fopen("output/k","w");
for (index_k=0; index_k < ppt->k_size[0]; index_k++) {
fprintf(out,"%e\n",ppt->k[0][index_k],pba->K);
}
fclose(out);
*/
/** - finally, find the global k_min and k_max for the ensemble of all modes 9scalars, vectors, tensors) */
ppt->k_min = _HUGE_;
ppt->k_max = 0.;
if (ppt->has_scalars == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_scalars][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_scalars][ppt->k_size[ppt->index_md_scalars]-1]); /* last value, inferred from perturbations structure */
}
if (ppt->has_vectors == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_vectors][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_vectors][ppt->k_size[ppt->index_md_vectors]-1]); /* last value, inferred from perturbations structure */
}
if (ppt->has_tensors == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_tensors][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_tensors][ppt->k_size[ppt->index_md_tensors]-1]); /* last value, inferred from perturbations structure */
}
free(k_max_cmb);
free(k_max_cl);
return _SUCCESS_;
}
/**
* Initialize a perturb_workspace structure. All fields are allocated
* here, with the exception of the perturb_vector '-->pv' field, which
* is allocated separately in perturb_vector_init. We allocate one
* such perturb_workspace structure per thread and per mode
* (scalar/../tensor). Then, for each thread, all initial conditions
* and wavenumbers will use the same workspace.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param ppw Input/Output: pointer to perturb_workspace structure which fields are allocated or filled here
* @return the error status
*/
int perturb_workspace_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
int index_mt=0;
int index_ap;
int l;
/** - Compute maximum l_max for any multipole */;
if (_scalars_) {
ppw->max_l_max = MAX(ppr->l_max_g, ppr->l_max_pol_g);
if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur);
if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm);
if (pba->has_dr == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_dr);
}
if (_tensors_) {
ppw->max_l_max = MAX(ppr->l_max_g_ten, ppr->l_max_pol_g_ten);
if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur);
if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm);
}
/** - Allocate \f$ s_l\f$[ ] array for freestreaming of multipoles (see arXiv:1305.3261) and initialize
to 1.0, which is the K=0 value. */
class_alloc(ppw->s_l, sizeof(double)*(ppw->max_l_max+1),ppt->error_message);
for (l=0; l<=ppw->max_l_max; l++){
ppw->s_l[l] = 1.0;
}
/** - define indices of metric perturbations obeying constraint
equations (this can be done once and for all, because the
vector of metric perturbations is the same whatever the
approximation scheme, unlike the vector of quantities to
be integrated, which is allocated separately in
perturb_vector_init) */
if (_scalars_) {
/* newtonian gauge */
if (ppt->gauge == newtonian) {
class_define_index(ppw->index_mt_psi,_TRUE_,index_mt,1); /* psi */
class_define_index(ppw->index_mt_phi_prime,_TRUE_,index_mt,1); /* phi' */
}
/* synchronous gauge (note that eta is counted in the vector of
quantities to be integrated, while here we only consider
quantities obeying to constraint equations) */
if (ppt->gauge == synchronous) {
class_define_index(ppw->index_mt_h_prime,_TRUE_,index_mt,1); /* h' */
class_define_index(ppw->index_mt_h_prime_prime,_TRUE_,index_mt,1); /* h'' */
class_define_index(ppw->index_mt_eta_prime,_TRUE_,index_mt,1); /* eta' */
class_define_index(ppw->index_mt_alpha,_TRUE_,index_mt,1); /* alpha = (h' + 6 tau') / (2 k**2) */
class_define_index(ppw->index_mt_alpha_prime,_TRUE_,index_mt,1); /* alpha' */
}
}
if (_vectors_) {
/* newtonian gauge */
if (ppt->gauge == newtonian) {
class_define_index(ppw->index_mt_V_prime,_TRUE_,index_mt,1);
}
if (ppt->gauge == synchronous) {
class_define_index(ppw->index_mt_hv_prime_prime,_TRUE_,index_mt,1);
}
}
if (_tensors_) {
class_define_index(ppw->index_mt_gw_prime_prime,_TRUE_,index_mt,1);
}
ppw->mt_size = index_mt;
/** - allocate some workspace in which we will store temporarily the
values of background, thermodynamics, metric and source
quantities at a given time */
class_alloc(ppw->pvecback,pba->bg_size_normal*sizeof(double),ppt->error_message);
class_alloc(ppw->pvecthermo,pth->th_size*sizeof(double),ppt->error_message);
class_alloc(ppw->pvecmetric,ppw->mt_size*sizeof(double),ppt->error_message);
/** - count number of approximations, initialize their indices, and allocate their flags */
index_ap=0;
class_define_index(ppw->index_ap_tca,_TRUE_,index_ap,1);
class_define_index(ppw->index_ap_rsa,_TRUE_,index_ap,1);
if (_scalars_) {
class_define_index(ppw->index_ap_ufa,pba->has_ur,index_ap,1);
class_define_index(ppw->index_ap_ncdmfa,pba->has_ncdm,index_ap,1);
}
ppw->ap_size=index_ap;
if (ppw->ap_size > 0)
class_alloc(ppw->approx,ppw->ap_size*sizeof(int),ppt->error_message);
/** - For definiteness, initialize approximation flags to arbitrary
values (correct values are overwritten in
pertub_find_approximation_switches) */
if (_scalars_) {
ppw->approx[ppw->index_ap_tca]=(int)tca_on;
ppw->approx[ppw->index_ap_rsa]=(int)rsa_off;
if (pba->has_ur == _TRUE_) {
ppw->approx[ppw->index_ap_ufa]=(int)ufa_off;
}
if (pba->has_ncdm == _TRUE_) {
ppw->approx[ppw->index_ap_ncdmfa]=(int)ncdmfa_off;
}
}
if (_tensors_) {
ppw->approx[ppw->index_ap_tca]=(int)tca_on;
ppw->approx[ppw->index_ap_rsa]=(int)rsa_off;
}
/** - allocate fields where some of the perturbations are stored */
if (_scalars_) {
if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
class_alloc(ppw->delta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppw->theta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppw->shear_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
}
}
return _SUCCESS_;
}
/**
* Free the perturb_workspace structure (with the exception of the
* perturb_vector '-->pv' field, which is freed separately in
* perturb_vector_free).
*
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param ppw Input: pointer to perturb_workspace structure to be freed
* @return the error status
*/
int perturb_workspace_free (
struct perturbs * ppt,
int index_md,
struct perturb_workspace * ppw
) {
free(ppw->s_l);
free(ppw->pvecback);
free(ppw->pvecthermo);
free(ppw->pvecmetric);
if (ppw->ap_size > 0)
free(ppw->approx);
if (_scalars_) {
if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
free(ppw->delta_ncdm);
free(ppw->theta_ncdm);
free(ppw->shear_ncdm);
}
}
free(ppw);
return _SUCCESS_;
}
/**
* Solve the perturbation evolution for a given mode, initial
* condition and wavenumber, and compute the corresponding source
* functions.
*
* For a given mode, initial condition and wavenumber, this function
* finds the time ranges over which the perturbations can be described
* within a given approximation. For each such range, it initializes
* (or redistributes) perturbations using perturb_vector_init(), and
* integrates over time. Whenever a "source sampling time" is passed,
* the source terms are computed and stored in the source table using
* perturb_sources().
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input/Output: pointer to the perturbation structure (output source functions S(k,tau) written here)
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param index_k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @return the error status
*/
int perturb_solve(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
int index_ic,
int index_k,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
/* contains all fixed parameters, indices and workspaces used by the perturb_derivs function */
struct perturb_parameters_and_workspace ppaw;
/* conformal time */
double tau,tau_lower,tau_upper,tau_mid;
/* multipole */
int l;
/* index running over time */
int index_tau;
/* number of values in the tau_sampling array that should be considered for a given mode */
int tau_actual_size;
/* running index over types (temperature, etc) */
int index_type;
/* Fourier mode */
double k;
/* number of time intervals where the approximation scheme is uniform */
int interval_number;
/* index running over such time intervals */
int index_interval;
/* number of time intervals where each particular approximation is uniform */
int * interval_number_of;
/* edge of intervals where approximation scheme is uniform: tau_ini, tau_switch_1, ..., tau_end */
double * interval_limit;
/* array of approximation scheme within each interval: interval_approx[index_interval][index_ap] */
int ** interval_approx;
/* index running over approximations */
int index_ap;
/* approximation scheme within previous interval: previous_approx[index_ap] */
int * previous_approx;
int n_ncdm,is_early_enough;
/* function pointer to ODE evolver and names of possible evolvers */
extern int evolver_rk();
extern int evolver_ndf15();
int (*generic_evolver)();
/* Related to the perturbation output */
int (*perhaps_print_variables)();
int index_ikout;
/** - initialize indices relevant for back/thermo tables search */
ppw->last_index_back=0;
ppw->last_index_thermo=0;
ppw->inter_mode = pba->inter_normal;
/** - get wavenumber value */
k = ppt->k[index_md][index_k];
class_test(k == 0.,
ppt->error_message,
"stop to avoid division by zero");
/** - If non-zero curvature, update array of free-streaming coefficients ppw->s_l */
if (pba->has_curvature == _TRUE_){
for (l = 0; l<=ppw->max_l_max; l++){
ppw->s_l[l] = sqrt(MAX(1.0-pba->K*(l*l-1.0)/k/k,0.));
}
}
/** - maximum value of tau for which sources are calculated for this wavenumber */
/* by default, today */
tau_actual_size = ppt->tau_size;
/** - using bisection, compute minimum value of tau for which this
wavenumber is integrated */
/* will be at least the first time in the background table */
tau_lower = pba->tau_table[0];
class_call(background_at_tau(pba,
tau_lower,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1.,
pth->inter_normal,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/* check that this initial time is indeed OK given imposed
conditions on kappa' and on k/aH */
class_test(ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa] >
ppr->start_small_k_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_small_k_at_tau_c_over_tau_h' up to at least %g, or decrease 'a_ini_over_a_today_default'\n",
ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa]);
class_test(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] >
ppr->start_large_k_at_tau_h_over_tau_k,
ppt->error_message,
"your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_large_k_at_tau_h_over_tau_k' up to at least %g, or decrease 'a_ini_over_a_today_default'\n",
ppt->k[index_md][ppt->k_size[index_md]-1]/ppw->pvecback[pba->index_bg_a]/ ppw->pvecback[pba->index_bg_H]);
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
class_test(fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.)>ppr->tol_ncdm_initial_w,
ppt->error_message,
"your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time at which the ncdm species number %d is not ultra-relativistic anymore, with w=%g, p=%g and rho=%g\n",
n_ncdm,
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm],
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm],
ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]);
}
}
/* is at most the time at which sources must be sampled */
tau_upper = ppt->tau_sampling[0];
/* start bisection */
tau_mid = 0.5*(tau_lower + tau_upper);
while ((tau_upper - tau_lower)/tau_lower > ppr->tol_tau_approx) {
is_early_enough = _TRUE_;
class_call(background_at_tau(pba,
tau_mid,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
/* if there are non-cold relics, check that they are relativistic enough */
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
if (fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.) > ppr->tol_ncdm_initial_w)
is_early_enough = _FALSE_;
}
}
/* also check that the two conditions on (aH/kappa') and (aH/k) are fulfilled */
if (is_early_enough == _TRUE_) {
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
if ((ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa] >
ppr->start_small_k_at_tau_c_over_tau_h) ||
(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] >
ppr->start_large_k_at_tau_h_over_tau_k))
is_early_enough = _FALSE_;
}
if (is_early_enough == _TRUE_)
tau_lower = tau_mid;
else
tau_upper = tau_mid;
tau_mid = 0.5*(tau_lower + tau_upper);
}
tau = tau_mid;
/** - find the number of intervals over which approximation scheme is constant */
class_alloc(interval_number_of,ppw->ap_size*sizeof(int),ppt->error_message);
ppw->inter_mode = pba->inter_normal;
class_call(perturb_find_approximation_number(ppr,
pba,
pth,
ppt,
index_md,
k,
ppw,
tau,
ppt->tau_sampling[tau_actual_size-1],
&interval_number,
interval_number_of),
ppt->error_message,
ppt->error_message);
class_alloc(interval_limit,(interval_number+1)*sizeof(double),ppt->error_message);
class_alloc(interval_approx,interval_number*sizeof(int*),ppt->error_message);
for (index_interval=0; index_interval<interval_number; index_interval++)
class_alloc(interval_approx[index_interval],ppw->ap_size*sizeof(int),ppt->error_message);
class_call(perturb_find_approximation_switches(ppr,
pba,
pth,
ppt,
index_md,
k,
ppw,
tau,
ppt->tau_sampling[tau_actual_size-1],
ppr->tol_tau_approx,
interval_number,
interval_number_of,
interval_limit,
interval_approx),
ppt->error_message,
ppt->error_message);
free(interval_number_of);
/** - fill the structure containing all fixed parameters, indices
and workspaces needed by perturb_derivs */
ppaw.ppr = ppr;
ppaw.pba = pba;
ppaw.pth = pth;
ppaw.ppt = ppt;
ppaw.index_md = index_md;
ppaw.index_ic = index_ic;
ppaw.index_k = index_k;
ppaw.k = k;
ppaw.ppw = ppw;
ppaw.ppw->inter_mode = pba->inter_closeby;
ppaw.ppw->last_index_back = 0;
ppaw.ppw->last_index_thermo = 0;
/** - check whether we need to print perturbations to a file for this wavenumber */
perhaps_print_variables = NULL;
ppw->index_ikout = -1;
for (index_ikout=0; index_ikout<ppt->k_output_values_num; index_ikout++){
if (ppt->index_k_output_values[index_md*ppt->k_output_values_num+index_ikout] == index_k){
ppw->index_ikout = index_ikout;
perhaps_print_variables = perturb_print_variables;
/* class_call(perturb_prepare_output_file(
pba,ppt,ppw,index_ikout,index_md),
ppt->error_message,
ppt->error_message);
*/
}
}
/** - loop over intervals over which approximation scheme is uniform. For each interval: */
for (index_interval=0; index_interval<interval_number; index_interval++) {
/** - --> (a) fix the approximation scheme */
for (index_ap=0; index_ap<ppw->ap_size; index_ap++)
ppw->approx[index_ap]=interval_approx[index_interval][index_ap];
/** - --> (b) get the previous approximation scheme. If the current
interval starts from the initial time tau_ini, the previous
approximation is set to be a NULL pointer, so that the
function perturb_vector_init() knows that perturbations must
be initialized */
if (index_interval==0) {
previous_approx=NULL;
}
else {
previous_approx=interval_approx[index_interval-1];
}
/** - --> (c) define the vector of perturbations to be integrated
over. If the current interval starts from the initial time
tau_ini, fill the vector with initial conditions for each
mode. If it starts from an approximation switching point,
redistribute correctly the perturbations from the previous to
the new vector of perturbations. */
class_call(perturb_vector_init(ppr,
pba,
pth,
ppt,
index_md,
index_ic,
k,
interval_limit[index_interval],
ppw,
previous_approx),
ppt->error_message,
ppt->error_message);
/** - --> (d) integrate the perturbations over the current interval. */
if(ppr->evolver == rk){
generic_evolver = evolver_rk;
}
else{
generic_evolver = evolver_ndf15;
}
class_call(generic_evolver(perturb_derivs,
interval_limit[index_interval],
interval_limit[index_interval+1],
ppw->pv->y,
ppw->pv->used_in_sources,
ppw->pv->pt_size,
&ppaw,
ppr->tol_perturb_integration,
ppr->smallest_allowed_variation,
perturb_timescale,
ppr->perturb_integration_stepsize,
ppt->tau_sampling,
tau_actual_size,
perturb_sources,
perhaps_print_variables,
ppt->error_message),
ppt->error_message,
ppt->error_message);
}
/** - if perturbations were printed in a file, close the file */
//if (perhaps_print_variables != NULL)
// fclose(ppw->perturb_output_file);
/** - fill the source terms array with zeros for all times between
the last integrated time tau_max and tau_today. */
for (index_tau = tau_actual_size; index_tau < ppt->tau_size; index_tau++) {
for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) {
ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + index_type]
[index_tau * ppt->k_size[index_md] + index_k] = 0.;
}
}
/** - free quantities allocated at the beginning of the routine */
class_call(perturb_vector_free(ppw->pv),
ppt->error_message,
ppt->error_message);
for (index_interval=0; index_interval<interval_number; index_interval++)
free(interval_approx[index_interval]);
free(interval_approx);
free(interval_limit);
return _SUCCESS_;
}
int perturb_prepare_output(struct background * pba,
struct perturbs * ppt,
struct precision * ppr){
int n_ncdm;
/************************/
/* For use with CONCEPT */
/************************/
char tmp[1024];
int index_q;
int index_l;
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
ppt->scalar_titles[0]='\0';
ppt->vector_titles[0]='\0';
ppt->tensor_titles[0]='\0';
if (ppt->k_output_values_num > 0) {
/** Write titles for all perturbations that we would like to print/store. */
if (ppt->has_scalars == _TRUE_){
class_store_columntitle(ppt->scalar_titles,"tau [Mpc]",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"a",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"delta_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"theta_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"shear_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol0_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol1_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol2_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"delta_b",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"theta_b",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"psi",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"phi",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"phi_prime",_TRUE_); //CGT
/* Perturbed recombination */
class_store_columntitle(ppt->scalar_titles,"delta_Tb",ppt->has_perturbed_recombination);
class_store_columntitle(ppt->scalar_titles,"delta_chi",ppt->has_perturbed_recombination);
/* Ultrarelativistic species */
class_store_columntitle(ppt->scalar_titles,"delta_ur",pba->has_ur);
class_store_columntitle(ppt->scalar_titles,"theta_ur",pba->has_ur);
class_store_columntitle(ppt->scalar_titles,"shear_ur",pba->has_ur);
/* Cold dark matter */
class_store_columntitle(ppt->scalar_titles,"delta_cdm",pba->has_cdm);
class_store_columntitle(ppt->scalar_titles,"theta_cdm",pba->has_cdm);
/* Non-cold dark matter */
if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
sprintf(tmp,"delta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"theta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"shear_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"cs2_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
/************************/
/* For use with CONCEPT */
/************************/
/* Include ncdm Theta_n_q_l_ncdm[n,q,l] in perturbation output */
sprintf(tmp,"M_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
sprintf(tmp,"dlnf0_dlnq_ncdm[%d,%d]",n_ncdm,index_q);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
sprintf(tmp,"q_ncdm[%d,%d]",n_ncdm,index_q);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
for (index_l=0; index_l<=ppr->l_max_ncdm; index_l++) {
/* sprintf(tmp,"Theta_ncdm[%d](%.16f,%d)",n_ncdm,pba->q_ncdm[n_ncdm][index_q],index_l); */
sprintf(tmp,"Theta_n_q_l_ncdm[%d,%d,%d]",n_ncdm,index_q,index_l);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
}
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
}
/* Decaying cold dark matter */
class_store_columntitle(ppt->scalar_titles, "delta_dcdm", pba->has_dcdm);
class_store_columntitle(ppt->scalar_titles, "theta_dcdm", pba->has_dcdm);
/* Decay radiation */
class_store_columntitle(ppt->scalar_titles, "delta_dr", pba->has_dr);
class_store_columntitle(ppt->scalar_titles, "theta_dr", pba->has_dr);
class_store_columntitle(ppt->scalar_titles, "shear_dr", pba->has_dr);
/* Scalar field scf */
class_store_columntitle(ppt->scalar_titles, "delta_scf", pba->has_scf);
class_store_columntitle(ppt->scalar_titles, "theta_scf", pba->has_scf);
/************************/
/* For use with CONCEPT */
/************************/
/* Include fld in perturbation output */
class_store_columntitle(ppt->scalar_titles, "delta_fld", pba->has_fld);
class_store_columntitle(ppt->scalar_titles, "theta_fld", pba->has_fld);
/**
* We choose to store cs2_fld = delta_p_fld/delta_rho_fld rather than
* simply delta_p_fld itself, as is done for massive neutrinos.
*/
class_store_columntitle(ppt->scalar_titles, "cs2_fld", pba->has_fld);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/* Include theta_tot in perturbation output */
class_store_columntitle(ppt->scalar_titles, "theta_tot", _TRUE_);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/* Include h_prime in perturbation output */
class_store_columntitle(ppt->scalar_titles, "h_prime", ppt->gauge == synchronous);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/* Include H_T_prime (in N-body gauge) in perturbation output */
class_store_columntitle(ppt->scalar_titles, "H_T_prime", _TRUE_);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
ppt->number_of_scalar_titles =
get_number_of_titles(ppt->scalar_titles);
}
if (ppt->has_tensors == _TRUE_){
class_store_columntitle(ppt->tensor_titles,"tau [Mpc]",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"a",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"delta_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"shear_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"l4_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol0_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol2_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol4_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"H (gw)",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"Hdot (gwdot)",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"delta_ur",ppt->evolve_tensor_ur);
class_store_columntitle(ppt->tensor_titles,"shear_ur",ppt->evolve_tensor_ur);
class_store_columntitle(ppt->tensor_titles,"l4_ur",ppt->evolve_tensor_ur);
if (ppt->evolve_tensor_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
sprintf(tmp,"delta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
sprintf(tmp,"theta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
sprintf(tmp,"shear_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
}
}
ppt->number_of_tensor_titles =
get_number_of_titles(ppt->tensor_titles);
}
}
return _SUCCESS_;
}
/**
* For a given mode and wavenumber, find the number of intervals of
* time between tau_ini and tau_end such that the approximation
* scheme (and the number of perturbation equations) is uniform.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @param tau_ini Input: initial time of the perturbation integration
* @param tau_end Input: final time of the perturbation integration
* @param interval_number Output: total number of intervals
* @param interval_number_of Output: number of intervals with respect to each particular approximation
* @return the error status
*/
int perturb_find_approximation_number(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
struct perturb_workspace * ppw,
double tau_ini,
double tau_end,
int * interval_number,
int * interval_number_of /* interval_number_of[index_ap] (already allocated) */
){
/** Summary: */
/* index running over approximations */
int index_ap;
/* value of a given approximation at tau_ini and tau_end */
int flag_ini,flag_end;
/** - fix default number of intervals to one (if no approximation switch) */
*interval_number=1;
/** - loop over each approximation and add the number of approximation switching times */
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_ini,
ppw),
ppt->error_message,
ppt->error_message);
flag_ini = ppw->approx[index_ap];
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_end,
ppw),
ppt->error_message,
ppt->error_message);
flag_end = ppw->approx[index_ap];
class_test(flag_end<flag_ini,
ppt->error_message,
"For each approximation scheme, the declaration of approximation labels in the enumeration must follow chronological order, e.g: enum approx_flags {flag1, flag2, flag3} with flag1 being the initial one and flag3 the final one");
*interval_number += flag_end-flag_ini;
interval_number_of[index_ap] = flag_end-flag_ini+1;
}
return _SUCCESS_;
}
/**
* For a given mode and wavenumber, find the values of time at which
* the approximation changes.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @param tau_ini Input: initial time of the perturbation integration
* @param tau_end Input: final time of the perturbation integration
* @param precision Input: tolerance on output values
* @param interval_number Input: total number of intervals
* @param interval_number_of Input: number of intervals with respect to each particular approximation
* @param interval_limit Output: value of time at the boundary of the intervals: tau_ini, tau_switch1, ..., tau_end
* @param interval_approx Output: value of approximations in each interval
* @return the error status
*/
int perturb_find_approximation_switches(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
struct perturb_workspace * ppw,
double tau_ini,
double tau_end,
double precision,
int interval_number,
int * interval_number_of,
double * interval_limit, /* interval_limit[index_interval] (already allocated) */
int ** interval_approx /* interval_approx[index_interval][index_ap] (already allocated) */
){
/** Summary: */
int index_ap;
int index_switch;
int index_switch_tot;
int num_switch;
double tau_min,lower_bound,upper_bound;
double mid=0;
double * unsorted_tau_switch;
double next_tau_switch;
int flag_ini;
int num_switching_at_given_time;
/** - write in output arrays the initial time and approximation */
interval_limit[0]=tau_ini;
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_ini,
ppw),
ppt->error_message,
ppt->error_message);
for (index_ap=0; index_ap<ppw->ap_size; index_ap++)
interval_approx[0][index_ap]=ppw->approx[index_ap];
/** - if there are no approximation switches, just write final time and return */
if (interval_number == 1) {
interval_limit[1]=tau_end;
}
/** - if there are switches, consider approximations one after each
other. Find switching time by bisection. Store all switches in
arbitrary order in array unsorted_tau_switch[ ] */
else {
class_alloc(unsorted_tau_switch,(interval_number-1)*sizeof(double),ppt->error_message);
index_switch_tot=0;
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
if (interval_number_of[index_ap] > 1) {
num_switch = interval_number_of[index_ap]-1;
tau_min = tau_ini;
flag_ini = interval_approx[0][index_ap];
for (index_switch=0; index_switch<num_switch; index_switch++) {
lower_bound=tau_min;
upper_bound=tau_end;
mid = 0.5*(lower_bound+upper_bound);
while (upper_bound - lower_bound > precision) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
mid,
ppw),
ppt->error_message,
ppt->error_message);
if (ppw->approx[index_ap] > flag_ini+index_switch) {
upper_bound=mid;
}
else {
lower_bound=mid;
}
mid = 0.5*(lower_bound+upper_bound);
}
unsorted_tau_switch[index_switch_tot]=mid;
index_switch_tot++;
tau_min=mid;
}
}
}
class_test(index_switch_tot != (interval_number-1),
ppt->error_message,
"bug in approximation switch search routine: should have %d = %d",
index_switch_tot,interval_number-1);
/** - now sort interval limits in correct order */
index_switch_tot=1;
while (index_switch_tot < interval_number) {
next_tau_switch=tau_end;
for (index_switch=0; index_switch<interval_number-1; index_switch++) {
if ((unsorted_tau_switch[index_switch] > interval_limit[index_switch_tot-1]) &&
(unsorted_tau_switch[index_switch] < next_tau_switch)) {
next_tau_switch=unsorted_tau_switch[index_switch];
}
}
interval_limit[index_switch_tot]=next_tau_switch;
index_switch_tot++;
}
interval_limit[index_switch_tot]=tau_end;
class_test(index_switch_tot != interval_number,
ppt->error_message,
"most probably two approximation switching time were found to be equal, which cannot be handled\n");
/** - store each approximation in chronological order */
for (index_switch=1; index_switch<interval_number; index_switch++) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
0.5*(interval_limit[index_switch]+interval_limit[index_switch+1]),
ppw),
ppt->error_message,
ppt->error_message);
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
interval_approx[index_switch][index_ap]=ppw->approx[index_ap];
/* check here that approximation does not go backward (remember
that by definition the value of an approximation can only
increase) */
class_test(interval_approx[index_switch][index_ap] < interval_approx[index_switch-1][index_ap],
ppt->error_message,
"The approximation with label %d is not defined correctly: it goes backward (from %d to %d) for k=%e and between tau=%e and %e; this cannot be handled\n",
index_ap,
interval_approx[index_switch-1][index_ap],
interval_approx[index_switch][index_ap],
k,
0.5*(interval_limit[index_switch-1]+interval_limit[index_switch]),
0.5*(interval_limit[index_switch]+interval_limit[index_switch+1])
);
}
/* check here that more than one approximation is not switched on at a given time */
num_switching_at_given_time=0;
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
if (interval_approx[index_switch][index_ap] != interval_approx[index_switch-1][index_ap])
num_switching_at_given_time++;
}
class_test(num_switching_at_given_time != 1,
ppt->error_message,
"for k=%e, at tau=%g, you switch %d approximations at the same time, this cannot be handled. Usually happens in two cases: triggers for different approximations coincide, or one approx is reversible\n",
k,
interval_limit[index_switch],
num_switching_at_given_time);
if (ppt->perturbations_verbose>2) {
if (_scalars_) {
if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) &&
(interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off))
fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]);
//fprintf(stderr,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]); //TBC
if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) &&
(interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on))
fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation at tau=%e\n",k,interval_limit[index_switch]);
if (pba->has_ur == _TRUE_) {
if ((interval_approx[index_switch-1][ppw->index_ap_ufa]==(int)ufa_off) &&
(interval_approx[index_switch][ppw->index_ap_ufa]==(int)ufa_on)) {
fprintf(stdout,"Mode k=%e: will switch on ur fluid approximation at tau=%e\n",k,interval_limit[index_switch]);
}
}
if (pba->has_ncdm == _TRUE_) {
if ((interval_approx[index_switch-1][ppw->index_ap_ncdmfa]==(int)ncdmfa_off) &&
(interval_approx[index_switch][ppw->index_ap_ncdmfa]==(int)ncdmfa_on)) {
fprintf(stdout,"Mode k=%e: will switch on ncdm fluid approximation at tau=%e\n",k,interval_limit[index_switch]);
}
}
}
if (_tensors_) {
if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) &&
(interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off))
fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation for tensors at tau=%e\n",k,interval_limit[index_switch]);
if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) &&
(interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on))
fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation for tensors at tau=%e\n",k,interval_limit[index_switch]);
}
}
}
free(unsorted_tau_switch);
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_end,
ppw),
ppt->error_message,
ppt->error_message);
}
return _SUCCESS_;
}
/**
* Initialize the field '-->pv' of a perturb_workspace structure, which
* is a perturb_vector structure. This structure contains indices and
* values of all quantities which need to be integrated with respect
* to time (and only them: quantities fixed analytically or obeying
* constraint equations are NOT included in this vector). This routine
* distinguishes between two cases:
*
* --> the input pa_old is set to the NULL pointer:
*
* This happens when we start integrating over a new wavenumber and we
* want to set initial conditions for the perturbations. Then, it is
* assumed that ppw-->pv is not yet allocated. This routine allocates
* it, defines all indices, and then fills the vector ppw-->pv-->y with
* the initial conditions defined in perturb_initial_conditions.
*
* --> the input pa_old is not set to the NULL pointer and describes
* some set of approximations:
*
* This happens when we need to change approximation scheme while
* integrating over a given wavenumber. The new approximation
* described by ppw-->pa is then different from pa_old. Then, this
* routine allocates a new vector with a new size and new index
* values; it fills this vector with initial conditions taken from the
* previous vector passed as an input in ppw-->pv, and eventually with
* some analytic approximations for the new variables appearing at
* this time; then the new vector comes in replacement of the old one,
* which is freed.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: workspace containing in input the approximation scheme, the background/thermodynamics/metric quantities, and eventually the previous vector y; and in output the new vector y.
* @param pa_old Input: NULL is we need to set y to initial conditions for a new wavenumber; points towards a perturb_approximations if we want to switch of approximation.
* @return the error status
*/
int perturb_vector_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
int index_ic,
double k,
double tau,
struct perturb_workspace * ppw, /* ppw->pv unallocated if pa_old = NULL, allocated and filled otherwise */
int * pa_old
) {
/** Summary: */
/** - define local variables */
struct perturb_vector * ppv;
int index_pt;
int l;
int n_ncdm,index_q,ncdm_l_size;
double rho_plus_p_ncdm,q,q2,epsilon,a,factor;
/** - allocate a new perturb_vector structure to which ppw-->pv will point at the end of the routine */
class_alloc(ppv,sizeof(struct perturb_vector),ppt->error_message);
/** - initialize pointers to NULL (they will be allocated later if
needed), relevant for perturb_vector_free() */
ppv->l_max_ncdm = NULL;
ppv->q_size_ncdm = NULL;
/** - define all indices in this new vector (depends on approximation scheme, described by the input structure ppw-->pa) */
index_pt = 0;
if (_scalars_) {
/* reject inconsistent values of the number of mutipoles in photon temperature hierarchy */
class_test(ppr->l_max_g < 4,
ppt->error_message,
"ppr->l_max_g should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third and fourth momentum");
/* reject inconsistent values of the number of mutipoles in photon polarization hierarchy */
class_test(ppr->l_max_pol_g < 4,
ppt->error_message,
"ppr->l_max_pol_g should be at least 4");
/* reject inconsistent values of the number of mutipoles in decay radiation hierarchy */
if (pba->has_dr == _TRUE_) {
class_test(ppr->l_max_dr < 4,
ppt->error_message,
"ppr->l_max_dr should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum");
}
/* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierarchy */
if (pba->has_ur == _TRUE_) {
class_test(ppr->l_max_ur < 4,
ppt->error_message,
"ppr->l_max_ur should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum");
}
/* photons */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
/* temperature */
ppv->l_max_g = ppr->l_max_g;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* higher momenta */
/* polarization */
ppv->l_max_pol_g = ppr->l_max_pol_g;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2);
}
}
/* baryons */
class_define_index(ppv->index_pt_delta_b,_TRUE_,index_pt,1); /* baryon density */
class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1); /* baryon velocity */
/* cdm */
class_define_index(ppv->index_pt_delta_cdm,pba->has_cdm,index_pt,1); /* cdm density */
class_define_index(ppv->index_pt_theta_cdm,pba->has_cdm && (ppt->gauge == newtonian),index_pt,1); /* cdm velocity */
/* dcdm */
class_define_index(ppv->index_pt_delta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm density */
class_define_index(ppv->index_pt_theta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm velocity */
/* ultra relativistic decay radiation */
if (pba->has_dr==_TRUE_){
ppv->l_max_dr = ppr->l_max_dr;
class_define_index(ppv->index_pt_F0_dr,_TRUE_,index_pt,ppv->l_max_dr+1); /* all momenta in Boltzmann hierarchy */
}
/* fluid */
if (pba->use_ppf == _FALSE_) {
class_define_index(ppv->index_pt_delta_fld,pba->has_fld,index_pt,1); /* fluid density */
class_define_index(ppv->index_pt_theta_fld,pba->has_fld,index_pt,1); /* fluid velocity */
}
else {
class_define_index(ppv->index_pt_Gamma_fld,pba->has_fld,index_pt,1); /* Gamma variable of PPF scheme */
}
/* scalar field */
class_define_index(ppv->index_pt_phi_scf,pba->has_scf,index_pt,1); /* scalar field density */
class_define_index(ppv->index_pt_phi_prime_scf,pba->has_scf,index_pt,1); /* scalar field velocity */
/* perturbed recombination: the indices are defined once tca is off. */
if ( (ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
class_define_index(ppv->index_pt_perturbed_recombination_delta_temp,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_perturbed_recombination_delta_chi,_TRUE_,index_pt,1);
}
/* ultra relativistic neutrinos */
if (pba->has_ur && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
class_define_index(ppv->index_pt_delta_ur,_TRUE_,index_pt,1); /* density of ultra-relativistic neutrinos/relics */
class_define_index(ppv->index_pt_theta_ur,_TRUE_,index_pt,1); /* velocity of ultra-relativistic neutrinos/relics */
class_define_index(ppv->index_pt_shear_ur,_TRUE_,index_pt,1); /* shear of ultra-relativistic neutrinos/relics */
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->l_max_ur = ppr->l_max_ur;
class_define_index(ppv->index_pt_l3_ur,_TRUE_,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */
}
}
/* non-cold dark matter */
if (pba->has_ncdm == _TRUE_) {
ppv->index_pt_psi0_ncdm1 = index_pt; /* density of ultra-relativistic neutrinos/relics */
ppv->N_ncdm = pba->N_ncdm;
class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
// Set value of ppv->l_max_ncdm:
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_off){
/* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierarchy */
class_test(ppr->l_max_ncdm < 4,
ppt->error_message,
"ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm);
//Copy value from precision parameter:
ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm;
ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm];
}
else{
// In the fluid approximation, hierarchy is cut at lmax = 2 and q dependence is integrated out:
ppv->l_max_ncdm[n_ncdm] = 2;
ppv->q_size_ncdm[n_ncdm] = 1;
}
index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm];
}
}
/* metric (only quantities to be integrated, not those obeying constraint equations) */
/* metric perturbation eta of synchronous gauge */
class_define_index(ppv->index_pt_eta,ppt->gauge == synchronous,index_pt,1);
/* metric perturbation phi of newtonian gauge ( we could fix it
using Einstein equations as a constraint equation for phi, but
integration is numerically more stable if we actually evolve
phi) */
class_define_index(ppv->index_pt_phi,ppt->gauge == newtonian,index_pt,1);
}
if (_vectors_) {
/* Vector baryon velocity: v_b^{(1)}. */
class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1);
/* eventually reject inconsistent values of the number of mutipoles in photon temperature hierarchy and polarization*/
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppv->l_max_g = ppr->l_max_g_ten;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */
ppv->l_max_pol_g = ppr->l_max_pol_g_ten;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */
}
}
/** - (a) metric perturbations V or \f$ h_v \f$ depending on gauge */
if (ppt->gauge == synchronous){
class_define_index(ppv->index_pt_hv_prime,_TRUE_,index_pt,1);
}
if (ppt->gauge == newtonian){
class_define_index(ppv->index_pt_V,_TRUE_,index_pt,1);
}
}
if (_tensors_) {
/* reject inconsistent values of the number of mutipoles in photon temperature hierarchy */
class_test(ppr->l_max_g_ten < 4,
ppt->error_message,
"ppr->l_max_g_ten should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third momentum");
/* reject inconsistent values of the number of mutipoles in photon polarization hierarchy */
class_test(ppr->l_max_pol_g_ten < 4,
ppt->error_message,
"ppr->l_max_pol_g_ten should be at least 4");
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppv->l_max_g = ppr->l_max_g_ten;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */
ppv->l_max_pol_g = ppr->l_max_pol_g_ten;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */
}
}
/* ultra relativistic neutrinos */
class_define_index(ppv->index_pt_delta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur density */
class_define_index(ppv->index_pt_theta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur velocity */
class_define_index(ppv->index_pt_shear_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur shear */
ppv->l_max_ur = ppr->l_max_ur;
class_define_index(ppv->index_pt_l3_ur,ppt->evolve_tensor_ur,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */
if (ppt->evolve_tensor_ncdm == _TRUE_) {
ppv->index_pt_psi0_ncdm1 = index_pt;
ppv->N_ncdm = pba->N_ncdm;
class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
// Set value of ppv->l_max_ncdm:
class_test(ppr->l_max_ncdm < 4,
ppt->error_message,
"ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm);
//Copy value from precision parameter:
ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm;
ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm];
index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm];
}
}
/** - (b) metric perturbation h is a propagating degree of freedom, so h and hdot are included
in the vector of ordinary perturbations, no in that of metric perturbations */
class_define_index(ppv->index_pt_gw,_TRUE_,index_pt,1); /* tensor metric perturbation h (gravitational waves) */
class_define_index(ppv->index_pt_gwdot,_TRUE_,index_pt,1); /* its time-derivative */
}
ppv->pt_size = index_pt;
/** - allocate vectors for storing the values of all these
quantities and their time-derivatives at a given time */
class_calloc(ppv->y,ppv->pt_size,sizeof(double),ppt->error_message);
class_alloc(ppv->dy,ppv->pt_size*sizeof(double),ppt->error_message);
class_alloc(ppv->used_in_sources,ppv->pt_size*sizeof(int),ppt->error_message);
/** - specify which perturbations are needed in the evaluation of source terms */
/* take all of them by default */
for (index_pt=0; index_pt<ppv->pt_size; index_pt++)
ppv->used_in_sources[index_pt] = _TRUE_;
/* indicate which ones are not needed (this is just for saving time,
omitting perturbations in this list will not change the
results!) */
if (_scalars_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* we don't need temperature multipoles above l=2 (but they are
defined only when rsa and tca are off) */
for (index_pt=ppv->index_pt_l3_g; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
/* for polarization, we only need l=0,2 (but l =1,3, ... are
defined only when rsa and tca are off) */
ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_;
for (index_pt=ppv->index_pt_pol3_g; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
/* we don't need ur multipoles above l=2 (but they are
defined only when rsa and ufa are off) */
for (index_pt=ppv->index_pt_l3_ur; index_pt <= ppv->index_pt_delta_ur+ppv->l_max_ur; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
}
if (pba->has_ncdm == _TRUE_) {
/* we don't need ncdm multipoles above l=2 (but they are
defined only when ncdmfa is off) */
index_pt = ppv->index_pt_psi0_ncdm1;
for(n_ncdm = 0; n_ncdm < ppv-> N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
if (l>2) ppv->used_in_sources[index_pt]=_FALSE_;
index_pt++;
}
}
}
}
}
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* we don't need temperature multipoles above except l=0,2,4 */
ppv->used_in_sources[ppv->index_pt_theta_g]=_FALSE_;
ppv->used_in_sources[ppv->index_pt_l3_g]=_FALSE_;
for (index_pt=ppv->index_pt_delta_g+5; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
/* same for polarization, we only need l=0,2,4 */
ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_;
ppv->used_in_sources[ppv->index_pt_pol3_g]=_FALSE_;
for (index_pt=ppv->index_pt_pol0_g+5; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
/* we need h' but not h */
ppv->used_in_sources[ppv->index_pt_gw]=_FALSE_;
}
/** - case of setting initial conditions for a new wavenumber */
if (pa_old == NULL) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: initializing vector at tau=%e\n",k,tau);
if (_scalars_) {
/** - --> (a) check that current approximation scheme is consistent
with initial conditions */
class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on,
ppt->error_message,
"scalar initial conditions assume radiation streaming approximation turned off");
if (pba->has_ur == _TRUE_) {
class_test(ppw->approx[ppw->index_ap_ufa] == (int)ufa_on,
ppt->error_message,
"scalar initial conditions assume ur fluid approximation turned off");
}
if (pba->has_ncdm == _TRUE_) {
class_test(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on,
ppt->error_message,
"scalar initial conditions assume ncdm fluid approximation turned off");
}
class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off,
ppt->error_message,
"scalar initial conditions assume tight-coupling approximation turned on");
}
if (_tensors_) {
class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off,
ppt->error_message,
"tensor initial conditions assume tight-coupling approximation turned on");
class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on,
ppt->error_message,
"tensor initial conditions assume radiation streaming approximation turned off");
}
/** - --> (b) let ppw-->pv points towards the perturb_vector structure
that we just created */
ppw->pv = ppv;
/** - --> (c) fill the vector ppw-->pv-->y with appropriate initial conditions */
class_call(perturb_initial_conditions(ppr,
pba,
ppt,
index_md,
index_ic,
k,
tau,
ppw),
ppt->error_message,
ppt->error_message);
}
/** - case of switching approximation while a wavenumber is being integrated */
else {
/** - --> (a) for the scalar mode: */
if (_scalars_) {
/** - ---> (a.1.) check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** - ---> (a.2.) some variables (b, cdm, fld, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
ppv->y[ppv->index_pt_delta_b] =
ppw->pv->y[ppw->pv->index_pt_delta_b];
ppv->y[ppv->index_pt_theta_b] =
ppw->pv->y[ppw->pv->index_pt_theta_b];
if (pba->has_cdm == _TRUE_) {
ppv->y[ppv->index_pt_delta_cdm] =
ppw->pv->y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == newtonian) {
ppv->y[ppv->index_pt_theta_cdm] =
ppw->pv->y[ppw->pv->index_pt_theta_cdm];
}
}
if (pba->has_dcdm == _TRUE_) {
ppv->y[ppv->index_pt_delta_dcdm] =
ppw->pv->y[ppw->pv->index_pt_delta_dcdm];
ppv->y[ppv->index_pt_theta_dcdm] =
ppw->pv->y[ppw->pv->index_pt_theta_dcdm];
}
if (pba->has_dr == _TRUE_){
for (l=0; l <= ppv->l_max_dr; l++)
ppv->y[ppv->index_pt_F0_dr+l] =
ppw->pv->y[ppw->pv->index_pt_F0_dr+l];
}
if (pba->has_fld == _TRUE_) {
if (pba->use_ppf == _FALSE_) {
ppv->y[ppv->index_pt_delta_fld] =
ppw->pv->y[ppw->pv->index_pt_delta_fld];
ppv->y[ppv->index_pt_theta_fld] =
ppw->pv->y[ppw->pv->index_pt_theta_fld];
}
else {
ppv->y[ppv->index_pt_Gamma_fld] =
ppw->pv->y[ppw->pv->index_pt_Gamma_fld];
}
}
if (pba->has_scf == _TRUE_) {
ppv->y[ppv->index_pt_phi_scf] =
ppw->pv->y[ppw->pv->index_pt_phi_scf];
ppv->y[ppv->index_pt_phi_prime_scf] =
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf];
}
if (ppt->gauge == synchronous)
ppv->y[ppv->index_pt_eta] =
ppw->pv->y[ppw->pv->index_pt_eta];
if (ppt->gauge == newtonian)
ppv->y[ppv->index_pt_phi] =
ppw->pv->y[ppw->pv->index_pt_phi];
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
/* tight-coupling approximation for shear_g (previously
computed in perturb_derivs: perturb_derivs is always
called at the end of generic_evolver, in order to update
all quantities in ppw to the time at which the
approximation is switched off) */
ppv->y[ppv->index_pt_shear_g] = ppw->tca_shear_g;
ppv->y[ppv->index_pt_l3_g] = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->s_l[3]*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for l=3 */
ppv->y[ppv->index_pt_pol0_g] = 2.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=0 */
ppv->y[ppv->index_pt_pol1_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*(5.-2.*ppw->s_l[2])/6.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=1 */
ppv->y[ppv->index_pt_pol2_g] = 0.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=2 */
ppv->y[ppv->index_pt_pol3_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*3.*ppw->s_l[3]/14.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=3 */
if (pba->has_ur == _TRUE_) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){
// This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly.
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
/* perturbed recombination */
/* the initial conditions are set when tca is switched off (current block) */
if (ppt->has_perturbed_recombination == _TRUE_){
ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] = 1./3.*ppv->y[ppw->pv->index_pt_delta_b];
ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =0.;
}
} // end of block tca ON -> tca OFF
/* perturbed recombination */
/* For any other transition in the approximation scheme, we should just copy the value of the perturbations, provided tca is already off (otherwise the indices are not yet allocated). For instance, we do not want to copy the values in the (k,tau) region where both UFA and TCA are engaged.*/
if ((ppt->has_perturbed_recombination == _TRUE_)&&(pa_old[ppw->index_ap_tca]==(int)tca_off)){
ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] =
ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =
ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
}
/* -- case of switching on ur fluid
approximation. Provide correct initial conditions to new set
of variables */
if (pba->has_ur == _TRUE_) {
if ((pa_old[ppw->index_ap_ufa] == (int)ufa_off) && (ppw->approx[ppw->index_ap_ufa] == (int)ufa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on ur fluid approximation at tau=%e\n",k,tau);
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
}
if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
ppv->y[ppv->index_pt_shear_g] =
ppw->pv->y[ppw->pv->index_pt_shear_g];
ppv->y[ppv->index_pt_l3_g] =
ppw->pv->y[ppw->pv->index_pt_l3_g];
for (l = 4; l <= ppw->pv->l_max_g; l++) {
ppv->y[ppv->index_pt_delta_g+l] =
ppw->pv->y[ppw->pv->index_pt_delta_g+l];
}
ppv->y[ppv->index_pt_pol0_g] =
ppw->pv->y[ppw->pv->index_pt_pol0_g];
ppv->y[ppv->index_pt_pol1_g] =
ppw->pv->y[ppw->pv->index_pt_pol1_g];
ppv->y[ppv->index_pt_pol2_g] =
ppw->pv->y[ppw->pv->index_pt_pol2_g];
ppv->y[ppv->index_pt_pol3_g] =
ppw->pv->y[ppw->pv->index_pt_pol3_g];
for (l = 4; l <= ppw->pv->l_max_pol_g; l++) {
ppv->y[ppv->index_pt_pol0_g+l] =
ppw->pv->y[ppw->pv->index_pt_pol0_g+l];
}
}
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
/* This is correct even when ncdmfa == off, since ppv->l_max_ncdm and
ppv->q_size_ncdm is updated.*/
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
}
}
/* -- case of switching on ncdm fluid
approximation. Provide correct initial conditions to new set
of variables */
if (pba->has_ncdm == _TRUE_) {
if ((pa_old[ppw->index_ap_ncdmfa] == (int)ncdmfa_off) && (ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on ncdm fluid approximation at tau=%e\n",k,tau);
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
}
if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
ppv->y[ppv->index_pt_shear_g] =
ppw->pv->y[ppw->pv->index_pt_shear_g];
ppv->y[ppv->index_pt_l3_g] =
ppw->pv->y[ppw->pv->index_pt_l3_g];
for (l = 4; l <= ppw->pv->l_max_g; l++) {
ppv->y[ppv->index_pt_delta_g+l] =
ppw->pv->y[ppw->pv->index_pt_delta_g+l];
}
ppv->y[ppv->index_pt_pol0_g] =
ppw->pv->y[ppw->pv->index_pt_pol0_g];
ppv->y[ppv->index_pt_pol1_g] =
ppw->pv->y[ppw->pv->index_pt_pol1_g];
ppv->y[ppv->index_pt_pol2_g] =
ppw->pv->y[ppw->pv->index_pt_pol2_g];
ppv->y[ppv->index_pt_pol3_g] =
ppw->pv->y[ppw->pv->index_pt_pol3_g];
for (l = 4; l <= ppw->pv->l_max_pol_g; l++) {
ppv->y[ppv->index_pt_pol0_g+l] =
ppw->pv->y[ppw->pv->index_pt_pol0_g+l];
}
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
}
}
a = ppw->pvecback[pba->index_bg_a];
index_pt = ppw->pv->index_pt_psi0_ncdm1;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
// We are in the fluid approximation, so ncdm_l_size is always 3.
ncdm_l_size = ppv->l_max_ncdm[n_ncdm]+1;
rho_plus_p_ncdm = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
for(l=0; l<=2; l++){
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+l] = 0.0;
}
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for(index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++){
// Integrate over distributions:
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] +=
pba->w_ncdm[n_ncdm][index_q]*q2*epsilon*
ppw->pv->y[index_pt];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] +=
pba->w_ncdm[n_ncdm][index_q]*q2*q*
ppw->pv->y[index_pt+1];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] +=
pba->w_ncdm[n_ncdm][index_q]*q2*q2/epsilon*
ppw->pv->y[index_pt+2];
//Jump to next momentum bin in ppw->pv->y:
index_pt += (ppw->pv->l_max_ncdm[n_ncdm]+1);
}
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] *=factor/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] *=k*factor/rho_plus_p_ncdm;
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] *=2.0/3.0*factor/rho_plus_p_ncdm;
}
}
}
}
/** - --> (b) for the vector mode */
if (_vectors_) {
/** - ---> (b.1.) check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** - ---> (b.2.) some variables (gw, gwdot, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
if (ppt->gauge == synchronous){
ppv->y[ppv->index_pt_hv_prime] =
ppw->pv->y[ppw->pv->index_pt_hv_prime];
}
if (ppt->gauge == newtonian){
ppv->y[ppv->index_pt_V] =
ppw->pv->y[ppw->pv->index_pt_V];
}
ppv->y[ppv->index_pt_theta_b] =
ppw->pv->y[ppw->pv->index_pt_theta_b];
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] = 0.0; //TBC
//-4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
ppv->y[ppv->index_pt_pol0_g] = 0.0; //TBC
//1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
}
}
/** - --> (c) for the tensor mode */
if (_tensors_) {
/** - ---> (c.1.) check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** - ---> (c.2.) some variables (gw, gwdot, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
ppv->y[ppv->index_pt_gw] =
ppw->pv->y[ppw->pv->index_pt_gw];
ppv->y[ppv->index_pt_gwdot] =
ppw->pv->y[ppw->pv->index_pt_gwdot];
if (ppt->evolve_tensor_ur == _TRUE_){
/* For now, neutrinos go here. */
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
if (ppt->evolve_tensor_ncdm == _TRUE_){
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){
// This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly.
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
ppv->y[ppv->index_pt_pol0_g] = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
}
}
/** - --> (d) free the previous vector of perturbations */
class_call(perturb_vector_free(ppw->pv),
ppt->error_message,
ppt->error_message);
/** - --> (e) let ppw-->pv points towards the perturb_vector structure
that we just created */
ppw->pv = ppv;
}
return _SUCCESS_;
}
/**
* Free the perturb_vector structure.
*
* @param pv Input: pointer to perturb_vector structure to be freed
* @return the error status
*/
int perturb_vector_free(
struct perturb_vector * pv
) {
if (pv->l_max_ncdm != NULL) free(pv->l_max_ncdm);
if (pv->q_size_ncdm != NULL) free(pv->q_size_ncdm);
free(pv->y);
free(pv->dy);
free(pv->used_in_sources);
free(pv);
return _SUCCESS_;
}
/**
* For each mode, wavenumber and initial condition, this function
* initializes in the vector all values of perturbed variables (in a
* given gauge). It is assumed here that all values have previously been
* set to zero, only non-zero values are set here.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: workspace containing in input the approximation scheme, the background/thermodynamics/metric quantities, and eventually the previous vector y; and in output the new vector y.
* @return the error status
*/
int perturb_initial_conditions(struct precision * ppr,
struct background * pba,
struct perturbs * ppt,
int index_md,
int index_ic,
double k,
double tau,
struct perturb_workspace * ppw
) {
/** Summary: */
/** --> Declare local variables */
double a,a_prime_over_a;
double w_fld,dw_over_da_fld,integral_fld;
double delta_ur=0.,theta_ur=0.,shear_ur=0.,l3_ur=0.,eta=0.,delta_cdm=0.,alpha, alpha_prime;
double delta_dr=0;
double q,epsilon,k2;
int index_q,n_ncdm,idx;
double rho_r,rho_m,rho_nu,rho_m_over_rho_r;
double fracnu,fracg,fracb,fraccdm,om;
double ktau_two,ktau_three;
double f_dr;
double delta_tot;
double velocity_tot;
double s2_squared;
/** --> For scalars */
if (_scalars_) {
/** - (a) compute relevant background quantities: compute rho_r,
rho_m, rho_nu (= all relativistic except photons), and their
ratio. */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
a = ppw->pvecback[pba->index_bg_a];
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
/* 8piG/3 rho_r(t_i) */
rho_r = ppw->pvecback[pba->index_bg_rho_g];
/* 8piG/3 rho_m(t_i) */
rho_m = ppw->pvecback[pba->index_bg_rho_b];
/* 8piG/3 rho_nu(t_i) (all neutrinos and collisionless relics being relativistic at that time) */
rho_nu = 0.;
if (pba->has_cdm == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
if (pba->has_dcdm == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
if (pba->has_dr == _TRUE_) {
rho_r += ppw->pvecback[pba->index_bg_rho_dr];
rho_nu += ppw->pvecback[pba->index_bg_rho_dr];
}
if (pba->has_ur == _TRUE_) {
rho_r += ppw->pvecback[pba->index_bg_rho_ur];
rho_nu += ppw->pvecback[pba->index_bg_rho_ur];
}
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm<pba->N_ncdm; n_ncdm++){
rho_r += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm];
rho_nu += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm];
}
}
class_test(rho_r == 0.,
ppt->error_message,
"stop to avoid division by zero");
/* f_nu = Omega_nu(t_i) / Omega_r(t_i) */
fracnu = rho_nu/rho_r;
/* f_g = Omega_g(t_i) / Omega_r(t_i) */
fracg = ppw->pvecback[pba->index_bg_rho_g]/rho_r;
/* f_b = Omega_b(t_i) / Omega_m(t_i) */
fracb = ppw->pvecback[pba->index_bg_rho_b]/rho_m;
/* f_cdm = Omega_cdm(t_i) / Omega_m(t_i) */
fraccdm = 1.-fracb;
/* Omega_m(t_i) / Omega_r(t_i) */
rho_m_over_rho_r = rho_m/rho_r;
/* omega = Omega_m(t_i) a(t_i) H(t_i) / sqrt(Omega_r(t_i))
= Omega_m(t_0) a(t_0) H(t_0) / sqrt(Omega_r(t_0)) assuming rho_m in a-3 and rho_r in a^-4
= (8piG/3 rho_m(t_i)) a(t_i) / sqrt(8piG/3 rho_r(t_i)) in Mpc-1
This (a priori strange) parameter is the relevant one for expressing a
as a function of tau during radiation and matter domination (but not DE domination).
Indeed the exact solution of Friedmann when there is only radiation and matter in
the universe is
a = [H(t_0)^2 Omega_m(t_0) a(t_0)^3 / 4] x [tau^2 + 4 tau / omega]
*/
om = a*rho_m/sqrt(rho_r);
/* (k tau)^2, (k tau)^3 */
ktau_two=k*k*tau*tau;
ktau_three=k*tau*ktau_two;
/* curvature-dependent factors */
s2_squared = 1.-3.*pba->K/k/k;
/** - (b) starts by setting everything in synchronous gauge. If
another gauge is needed, we will perform a gauge
transformation below. */
/** - --> (b.1.) adiabatic */
if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) {
/* The following formulas are valid at leading order in
(k*tau) and (om*tau), and order zero in
tight-coupling. Identical to first order terms in CRS,
except for normalization (when ppr->curvature_ini=1, tau=1:
leads to factor 1/2 difference between CRS formulas with
beta1=0). Identical to CAMB when om set to zero in theta_g,
theta_ur, shear_ur, tau
In the non-flat case the relation R=eta is still valid
outside the horizon for adiabatic IC. Hence eta is still
set to ppr->curvature_ini at leading order. Factors s2
appear through the solution of Einstein equations and
equations of motion. */
/* photon density */
ppw->pv->y[ppw->pv->index_pt_delta_g] = - ktau_two/3. * (1.-om*tau/5.)
* ppr->curvature_ini * s2_squared;
/* photon velocity */
ppw->pv->y[ppw->pv->index_pt_theta_g] = - k*ktau_three/36. * (1.-3.*(1.+5.*fracb-fracnu)/20./(1.-fracnu)*om*tau)
* ppr->curvature_ini * s2_squared;
/* tighly-coupled baryons */
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* baryon density */
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; /* baryon velocity */
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* cdm density */
/* cdm velocity vanishes in the synchronous gauge */
}
if (pba->has_dcdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_dcdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* dcdm density */
/* dcdm velocity velocity vanishes initially in the synchronous gauge */
}
/* fluid (assumes wa=0, if this is not the case the
fluid will catch anyway the attractor solution) */
if (pba->has_fld == _TRUE_) {
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
if (pba->use_ppf == _FALSE_) {
ppw->pv->y[ppw->pv->index_pt_delta_fld] = - ktau_two/4.*(1.+w_fld)*(4.-3.*pba->cs2_fld)/(4.-6.*w_fld+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC: curvature
ppw->pv->y[ppw->pv->index_pt_theta_fld] = - k*ktau_three/4.*pba->cs2_fld/(4.-6.*w_fld+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC:curvature
}
/* if use_ppf == _TRUE_, y[ppw->pv->index_pt_Gamma_fld] will be automatically set to zero, and this is what we want (although one could probably work out some small nonzero initial conditions: TODO) */
}
if (pba->has_scf == _TRUE_) {
/** - ---> Canonical field (solving for the perturbations):
* initial perturbations set to zero, they should reach the attractor soon enough.
* - ---> TODO: Incorporate the attractor IC from 1004.5509.
* delta_phi \f$ = -(a/k)^2/\phi'(\rho + p)\theta \f$,
* delta_phi_prime \f$ = a^2/\phi' \f$ (delta_rho_phi + V'delta_phi),
* and assume theta, delta_rho as for perfect fluid
* with \f$ c_s^2 = 1 \f$ and w = 1/3 (ASSUMES radiation TRACKING)
*/
ppw->pv->y[ppw->pv->index_pt_phi_scf] = 0.;
/* a*a/k/k/ppw->pvecback[pba->index_bg_phi_prime_scf]*k*ktau_three/4.*1./(4.-6.*(1./3.)+3.*1.) * (ppw->pvecback[pba->index_bg_rho_scf] + ppw->pvecback[pba->index_bg_p_scf])* ppr->curvature_ini * s2_squared; */
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] = 0.;
/* delta_fld expression * rho_scf with the w = 1/3, c_s = 1
a*a/ppw->pvecback[pba->index_bg_phi_prime_scf]*( - ktau_two/4.*(1.+1./3.)*(4.-3.*1.)/(4.-6.*(1/3.)+3.*1.)*ppw->pvecback[pba->index_bg_rho_scf] - ppw->pvecback[pba->index_bg_dV_scf]*ppw->pv->y[ppw->pv->index_pt_phi_scf])* ppr->curvature_ini * s2_squared; */
}
/* all relativistic relics: ur, early ncdm, dr */
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; /* density of ultra-relativistic neutrinos/relics */
theta_ur = - k*ktau_three/36./(4.*fracnu+15.) * (4.*fracnu+11.+12.*s2_squared-3.*(8.*fracnu*fracnu+50.*fracnu+275.)/20./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini * s2_squared; /* velocity of ultra-relativistic neutrinos/relics */ //TBC
shear_ur = ktau_two/(45.+12.*fracnu) * (3.*s2_squared-1.) * (1.+(4.*fracnu-5.)/4./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini;//TBC /s2_squared; /* shear of ultra-relativistic neutrinos/relics */ //TBC:0
l3_ur = ktau_three*2./7./(12.*fracnu+45.)* ppr->curvature_ini;//TBC
if (pba->has_dr == _TRUE_) delta_dr = delta_ur;
}
/* synchronous metric perturbation eta */
//eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)) / s2_squared;
//eta = ppr->curvature_ini * s2_squared * (1.-ktau_two/12./(15.+4.*fracnu)*(15.*s2_squared-10.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om));
eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om));
}
/* isocurvature initial conditions taken from Bucher, Moodely,
Turok 99, with just a different normalization convention for
tau and the scale factor. [k tau] from BMT99 is left invariant
because it is the ratio [k/aH]. But [Omega_i,0 tau] from BMT99
must be replaced by [frac_i*om*tau/4]. Some doubts remain about
the niv formulas, that should be recheked at some point. We
also checked that for bi,cdi,nid, everything coincides exactly
with the CAMB formulas. */
/** - --> (b.2.) Cold dark matter Isocurvature */
if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) {
class_test(pba->has_cdm == _FALSE_,
ppt->error_message,
"not consistent to ask for CDI in absence of CDM!");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fraccdm*om*tau*(-2./3.+om*tau/4.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fraccdm*om*ktau_two/12.;
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g];
theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g];
shear_ur = -ppr->entropy_ini*fraccdm*ktau_two*tau*om/6./(2.*fracnu+15.);
}
eta = -ppr->entropy_ini*fraccdm*om*tau*(1./6.-om*tau/16.);
}
/** - --> (b.3.) Baryon Isocurvature */
if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) {
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracb*om*tau*(-2./3.+om*tau/4.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracb*om*ktau_two/12.;
ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
}
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g];
theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g];
shear_ur = -ppr->entropy_ini*fracb*ktau_two*tau*om/6./(2.*fracnu+15.);
}
eta = -ppr->entropy_ini*fracb*om*tau*(1./6.-om*tau/16.);
}
/** - --> (b.4.) Neutrino density Isocurvature */
if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) {
class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_),
ppt->error_message,
"not consistent to ask for NID in absence of ur or ncdm species!");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracnu/fracg*(-1.+ktau_two/6.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracnu/fracg*k*k*tau*(1./4.-fracb/fracg*3./16.*om*tau);
ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini*fracnu/fracg/8.*ktau_two;
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*fracnu*fracb/fracg/80.*ktau_two*om*tau;
}
delta_ur = ppr->entropy_ini*(1.-ktau_two/6.);
theta_ur = ppr->entropy_ini*k*k*tau/4.;
shear_ur = ppr->entropy_ini*ktau_two/(4.*fracnu+15.)/2.;
eta = -ppr->entropy_ini*fracnu/(4.*fracnu+15.)/6.*ktau_two;
}
/** - --> (b.5.) Neutrino velocity Isocurvature */
if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) {
class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_),
ppt->error_message,
"not consistent to ask for NIV in absence of ur or ncdm species!");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*k*tau*fracnu/fracg*
(1. - 3./16.*fracb*(2.+fracg)/fracg*om*tau); /* small diff wrt camb */
ppw->pv->y[ppw->pv->index_pt_theta_g] = ppr->entropy_ini*fracnu/fracg*3./4.*k*
(-1.+3./4.*fracb/fracg*om*tau+3./16.*om*om*tau*tau*fracb/fracg/fracg*(fracg-3.*fracb)+ktau_two/6.);
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* small diff wrt camb */
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*9./64.*fracnu*fracb/fracg*k*tau*om*tau;
}
delta_ur = -ppr->entropy_ini*k*tau*(1.+3./16.*fracb*fracnu/fracg*om*tau); /* small diff wrt camb */
theta_ur = ppr->entropy_ini*3./4.*k*(1. - 1./6.*ktau_two*(4.*fracnu+9.)/(4.*fracnu+5.));
shear_ur = ppr->entropy_ini/(4.*fracnu+15.)*k*tau*(1. + 3.*om*tau*fracnu/(4.*fracnu+15.)); /* small diff wrt camb */
eta = ppr->entropy_ini*fracnu*k*tau*(-1./(4.*fracnu+5.) + (-3./64.*fracb/fracg+15./4./(4.*fracnu+15.)/(4.*fracnu+5.)*om*tau)); /* small diff wrt camb */
}
/** - (c) If the needed gauge is really the synchronous gauge, we need to affect the previously computed value of eta to the actual variable eta */
if (ppt->gauge == synchronous) {
ppw->pv->y[ppw->pv->index_pt_eta] = eta;
}
/** - (d) If the needed gauge is the newtonian gauge, we must compute alpha and then perform a gauge transformation for each variable */
if (ppt->gauge == newtonian) {
/* alpha is like in Ma & Bertschinger: (h'+6 eta')/(2k^2). We obtain it from the first two Einstein equations:
alpha = [eta + 3/2 (a'/a)^2 (delta_rho/rho_c) / k^2 /s_2^2 + 3/2 (a'/a)^3 3 ((rho+p)theta/rho_c) / k^4 / s_2^2] / (a'/a)
= [eta + 3/2 (a'/a)^2 / k^2 /s_2^2 {delta_tot + 3 (a'/a) /k^2 velocity_tot}] / (a'/a)
with
delta_tot = (delta_rho/rho_c)
= [rho_r delta_r + rho_m delta_m] / (rho_r + rho_m)
= [delta_r + (rho_m/rho_r) delta_m] / (1 + rho_m/rho_r)
= [(f_g delta_g + f_nu delta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm delta_cdm)] / (1 + rho_m/rho_r)
velocity_tot = ((rho+p)theta/rho_c)
= [(4/3) rho_r theta_r + rho_m theta_m] / (rho_r + rho_m)
= [(4/3) theta_r + (rho_m/rho_r) theta_m] / (1 + rho_m/rho_r)
= [(4/3) (f_g theta_g + f_nu theta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm 0)] / (1 + rho_m/rho_r)
*/
if (pba->has_cdm == _TRUE_)
delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_cdm];
else if (pba->has_dcdm == _TRUE_)
delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_dcdm];
else
delta_cdm=0.;
// note: if there are no neutrinos, fracnu, delta_ur and theta_ur below will consistently be zero.
delta_tot = (fracg*ppw->pv->y[ppw->pv->index_pt_delta_g]+fracnu*delta_ur+rho_m_over_rho_r*(fracb*ppw->pv->y[ppw->pv->index_pt_delta_b]+fraccdm*delta_cdm))/(1.+rho_m_over_rho_r);
velocity_tot = ((4./3.)*(fracg*ppw->pv->y[ppw->pv->index_pt_theta_g]+fracnu*theta_ur) + rho_m_over_rho_r*fracb*ppw->pv->y[ppw->pv->index_pt_theta_b])/(1.+rho_m_over_rho_r);
alpha = (eta + 3./2.*a_prime_over_a*a_prime_over_a/k/k/s2_squared*(delta_tot + 3.*a_prime_over_a/k/k*velocity_tot))/a_prime_over_a;
ppw->pv->y[ppw->pv->index_pt_phi] = eta - a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_delta_g] -= 4.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_g] += k*k*alpha;
ppw->pv->y[ppw->pv->index_pt_delta_b] -= 3.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_b] += k*k*alpha;
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] -= 3.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_cdm] = k*k*alpha;
}
if (pba->has_dcdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_dcdm] += (-3.*a_prime_over_a - a*pba->Gamma_dcdm)*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_dcdm] = k*k*alpha;
}
/* fluid */
if ((pba->has_fld == _TRUE_) && (pba->use_ppf == _FALSE_)) {
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
ppw->pv->y[ppw->pv->index_pt_delta_fld] += 3*(1.+w_fld)*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_fld] += k*k*alpha;
}
/* scalar field: check */
if (pba->has_scf == _TRUE_) {
alpha_prime = 0.0;
/* - 2. * a_prime_over_a * alpha + eta
- 4.5 * (a2/k2) * ppw->rho_plus_p_shear; */
ppw->pv->y[ppw->pv->index_pt_phi_scf] += alpha*ppw->pvecback[pba->index_bg_phi_prime_scf];
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] +=
(-2.*a_prime_over_a*alpha*ppw->pvecback[pba->index_bg_phi_prime_scf]
-a*a* dV_scf(pba,ppw->pvecback[pba->index_bg_phi_scf])*alpha
+ppw->pvecback[pba->index_bg_phi_prime_scf]*alpha_prime);
}
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) {
delta_ur -= 4.*a_prime_over_a*alpha;
theta_ur += k*k*alpha;
/* shear and l3 are gauge invariant */
if (pba->has_dr == _TRUE_)
delta_dr += (-4.*a_prime_over_a + a*pba->Gamma_dcdm*ppw->pvecback[pba->index_bg_rho_dcdm]/ppw->pvecback[pba->index_bg_rho_dr])*alpha;
}
} /* end of gauge transformation to newtonian gauge */
/** - (e) In any gauge, we should now implement the relativistic initial conditions in ur and ncdm variables */
if (pba->has_ur == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_ur] = delta_ur;
ppw->pv->y[ppw->pv->index_pt_theta_ur] = theta_ur;
ppw->pv->y[ppw->pv->index_pt_shear_ur] = shear_ur;
ppw->pv->y[ppw->pv->index_pt_l3_ur] = l3_ur;
}
if (pba->has_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++) {
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
ppw->pv->y[idx] = -0.25 * delta_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+1] = -epsilon/3./q/k*theta_ur* pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+2] = -0.5 * shear_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+3] = -0.25 * l3_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
//Jump to next momentum bin:
idx += (ppw->pv->l_max_ncdm[n_ncdm]+1);
}
}
}
if (pba->has_dr == _TRUE_) {
f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*ppw->pvecback[pba->index_bg_rho_dr];
ppw->pv->y[ppw->pv->index_pt_F0_dr] = delta_dr*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+1] = 4./(3.*k)*theta_ur*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+2] = 2.*shear_ur*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+3] = l3_ur*f_dr;
}
}
/** --> For tensors */
if (_tensors_) {
/** tensor initial conditions take into account the fact that
scalar (resp. tensor) \f$ C_l\f$'s are related to the real space
power spectrum of curvature (resp. of the tensor part of
metric perturbations)
\f[ <R(x) R(x)> \ \ \sum_{ij} <h_{ij}(x) h^{ij}(x)> \f]
In momentum space it is conventional to use the modes R(k)
and h(k) where the quantity h obeying to the equation of
propagation:
\f[ h'' + \frac{2a'}{a} h + [k2+2K] h = 12\pi Ga2 (\rho+p) \sigma = 8\pi Ga2 p \pi \f]
and the power spectra in real space and momentum space are related through:
\f[ <R(x) R(x)> = \int \frac{dk}{k} \left[ \frac{k^3}{2\pi^2} <R(k)R(k)^*>\right] = \int \frac{dk}{k} \mathcal{P}_R(k) \f]
\f[\sum_{ij} <h_{ij}(x) h^{ij}(x)> = \frac{dk}{k} \left[ \frac{k^3}{2\pi^2} F\left(\frac{k^2}{K}\right) <h(k)h(k)^*>\right] = \int \frac{dk}{k} F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f]
where \f$ \mathcal{P}_R\f$ and \f$ \mathcal{P}_h\f$ are the dimensionless spectrum of
curvature R, and F is a function of k2/K, where K is the curvature
parameter. F is equal to one in flat space (K=0), and coming
from the contraction of the laplacian eigentensor \f$ Q_{ij}\f$ with
itself. We will give F explicitly below.
Similarly the scalar (S) and tensor (T) \f$ C_l\f$'s are given by
\f[ C_l^S = 4\pi \int \frac{dk}{k} [\Delta_l^S(q)]^2 \mathcal{P}_R(k) \f]
\f[ C_l^T = 4\pi \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f]
The usual convention for the tensor-to-scalar ratio
\f$ r = A_t / A_s \f$ at pivot scale
= 16 epsilon in single-field inflation
is such that for constant \f$ \mathcal{P}_R(k)\f$ and \f$ \mathcal{P}_h(k)\f$,
\f[ r = 6 \frac{\mathcal{P}_h(k)}{\mathcal{P}_R(k)} \f]
so
\f[ \mathcal{P}_h(k) = \frac{\mathcal{P}_R(k) r}{6} = \frac{A_s r}{6} = \frac{A_t}{6} \f]
A priori it would make sense to say that for a power-law
primordial spectrum there is an extra factor \f$ (k/k_{pivot})^{n_t} \f$
(and eventually running and so on and so forth...)
However it has been shown that the minimal models of
inflation in a negatively curved bubble lead to
\f$ \mathcal{P}_h(k)=\tanh(\pi*\nu/2)\f$. In open models it is customary to
define the tensor tilt in a non-flat universe as a deviation
from this behavior rather than from true scale-invariance in
the above sense.
Hence we should have
\f[ \mathcal{P}_h(k) = \frac{A_t}{6} [ \tanh(\pi*\frac{\nu}{2})] (k/k_{pivot})^{(n_t+...)}\f]
where the brackets \f[ [...] \f] mean "if K<0"
Then
\f[ C_l^T = 4\pi \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \frac{A_t}{6} [\tanh(\pi*\frac{\nu}{2})] (k/k_{pivot})^{(n_t+...)} \f]
In the code, it is then a matter of choice to write:
- In the primordial module: \f$ \mathcal{P}_h(k) = \frac{A_t}{6} \tanh{(\pi*\frac{\nu}{2})} (k/k^*)^{n_T}\f$
- In the perturbation initial conditions: \f$ h = 1\f$
- In the spectra module: \f$ C_l^T = \frac{4}{\pi} \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f$
or:
- In the primordial module: \f$ \mathcal{P}_h(k) = A_t (k/k^*)^{n_T} \f$
- In the perturbation initial conditions: \f$ h = \sqrt{[F\left(\frac{k^2}{K}\right) / 6] \tanh{(\pi*\frac{\nu}{2})}} \f$
- In the spectra module: \f$ C_l^T = \frac{4}{\pi} \int \frac{dk}{k} [\Delta_l^T(q)]^2 \mathcal{P}_h(k) \f$
We choose this last option, such that the primordial and
spectra module differ minimally in flat and non-flat space. Then we must impose
\f[ h = \sqrt{\left(\frac{F}{6}\right) \tanh{(\pi*\frac{\nu}{2})}} \f]
The factor F is found to be given by:
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dk}{k} \frac{k2(k2-K)}{(k2+3K)(k2+2K)} \mathcal{P}_h(k) \f]
Introducing as usual \f$ q2 = k2 - 3K \f$ and using qdq = kdk this gives
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dk}{k} \frac{(q2-3K)(q2-4K)}{q2(q2-K)} \mathcal{P}_h(k) \f]
Using qdq = kdk this is equivalent to
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dq}{q} \frac{q2-4K}{q2-K} \mathcal{P}_h(k(q)) \f]
Finally, introducing \f$ \nu=q/\sqrt{|K|}\f$ and sgnK=SIGN(k)\f$=\pm 1\f$, this could also be written
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{d\nu}{\nu} \frac{(\nu2-4sgnK)}{(\nu2-sgnK)} \mathcal{P}_h(k(\nu)) \f]
Equation (43,44) of Hu, Seljak, White, Zaldarriaga is
equivalent to absorbing the above factor
\f$ (\nu2-4sgnK)/(\nu2-sgnK)\f$ in the definition of the primordial
spectrum. Since the initial condition should be written in terms of k rather than nu, they should read
\f[ h = \sqrt{ [k2(k2-K)]/[(k2+3K)(k2+2K)] / 6 * \tanh{(\pi*\frac{\nu}{2})} } \f]
We leave the freedom to multiply by an arbitrary number
ppr->gw_ini. The standard convention corresponding to
standard definitions of r, \f$ A_T\f$, \f$ n_T\f$ is however ppr->gw_ini=1.
*
*/
if (index_ic == ppt->index_ic_ten) {
ppw->pv->y[ppw->pv->index_pt_gw] = ppr->gw_ini/_SQRT6_;
}
k2 = k*k;
if (pba->sgnK != 0) {
ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(k2*(k2-pba->K)/(k2+3.*pba->K)/(k2+2.*pba->K));
}
if (pba->sgnK == -1) {
if (k*k+3*pba->K >= 0.) {
ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(tanh(_PI_/2.*sqrt(k2+3*pba->K)/sqrt(-pba->K)));
}
else {
ppw->pv->y[ppw->pv->index_pt_gw] = 0.;
}
}
}
return _SUCCESS_;
}
/**
* Evaluate background/thermodynamics at \f$ \tau \f$, infer useful flags / time scales for integrating perturbations.
*
* Evaluate background quantities at \f$ \tau \f$, as well as thermodynamics for scalar mode; infer useful flags and time scales for integrating the perturbations:
* - check whether tight-coupling approximation is needed.
* - check whether radiation (photons, massless neutrinos...) perturbations are needed.
* - choose step of integration: step = ppr->perturb_integration_stepsize * min_time_scale, where min_time_scale = smallest time scale involved in the equations. There are three time scales to compare:
* -# that of recombination, \f$ \tau_c = 1/\kappa' \f$
* -# Hubble time scale, \f$ \tau_h = a/a' \f$
* -# Fourier mode, \f$ \tau_k = 1/k \f$
*
* So, in general, min_time_scale = \f$ \min(\tau_c, \tau_b, \tau_h, \tau_k) \f$.
*
* However, if \f$ \tau_c \ll \tau_h \f$ and \f$ \tau_c
* \ll \tau_k \f$, we can use the tight-coupling regime for photons
* and write equations in such way that the time scale \f$
* \tau_c \f$ becomes irrelevant (no effective mass term in \f$
* 1/\tau_c \f$). Then, the smallest
* scale in the equations is only \f$ \min(\tau_h, \tau_k) \f$.
* In practise, it is sufficient to use only the condition \f$ \tau_c \ll \tau_h \f$.
*
* Also, if \f$ \rho_{matter} \gg \rho_{radiation} \f$ and \f$ k \gg
* aH \f$, we can switch off radiation perturbations (i.e. switch on
* the free-streaming approximation) and then the smallest scale is
* simply \f$ \tau_h \f$.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: in output contains the approximation to be used at this time
* @return the error status
*/
int perturb_approximations(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double tau,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
/* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */
double tau_k;
/* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */
double tau_h;
/* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */
double tau_c;
/** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */
class_test(k == 0.,
ppt->error_message,
"stop to avoid division by zero");
tau_k = 1./k;
/** - evaluate background quantities with background_at_tau() and
Hubble time scale \f$ \tau_h = a/a' \f$ */
class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), ppw->pvecback),
pba->error_message,
ppt->error_message);
class_test(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a] == 0.,
ppt->error_message,
"aH=0, stop to avoid division by zero");
tau_h = 1./(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a]);
/** - for scalar modes: */
if (_scalars_) {
/** - --> (a) evaluate thermodynamical quantities with thermodynamics_at_z() */
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/** - ---> (b.1.) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */
if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
/** - ---> (b.2.) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */
else {
/** - ----> (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa];
class_test(tau_c < 0.,
ppt->error_message,
"tau_c = 1/kappa' should always be positive unless there is something wrong in the thermodynamics module. However you have here tau_c=%e at z=%e, conformal time=%e x_e=%e. (This could come from the interpolation of a too poorly sampled reionisation history?).\n",
tau_c,
1./ppw->pvecback[pba->index_bg_a]-1.,
tau,
ppw->pvecthermo[pth->index_th_xe]);
/** - ----> (b.2.b) check whether tight-coupling approximation should be on */
if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) &&
(tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) {
ppw->approx[ppw->index_ap_tca] = (int)tca_on;
}
else {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
}
/** - --> (c) free-streaming approximations */
if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) &&
(tau > pth->tau_free_streaming) &&
(ppr->radiation_streaming_approximation != rsa_none)) {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_on;
}
else {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_off;
}
if (pba->has_ur == _TRUE_) {
if ((tau/tau_k > ppr->ur_fluid_trigger_tau_over_tau_k) &&
(ppr->ur_fluid_approximation != ufa_none)) {
ppw->approx[ppw->index_ap_ufa] = (int)ufa_on;
}
else {
ppw->approx[ppw->index_ap_ufa] = (int)ufa_off;
}
}
if (pba->has_ncdm == _TRUE_) {
if ((tau/tau_k > ppr->ncdm_fluid_trigger_tau_over_tau_k) &&
(ppr->ncdm_fluid_approximation != ncdmfa_none)) {
ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_on;
}
else {
ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_off;
}
}
}
/** - for tensor modes: */
if (_tensors_) {
/** - --> (a) evaluate thermodynamical quantities with thermodynamics_at_z() */
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/** - ---> (b.1.) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */
if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
/** - ---> (b.2.) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */
else {
/** - ----> (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa];
/** - ----> (b.2.b) check whether tight-coupling approximation should be on */
if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) &&
(tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) {
ppw->approx[ppw->index_ap_tca] = (int)tca_on;
}
else {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
}
if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) &&
(tau > pth->tau_free_streaming) &&
(ppr->radiation_streaming_approximation != rsa_none)) {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_on;
}
else {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_off;
}
}
return _SUCCESS_;
}
/**
* Compute typical timescale over which the perturbation equations
* vary. Some integrators (e.g. Runge-Kunta) benefit from calling this
* routine at each step in order to adapt the next step.
*
* This is one of the few functions in the code which is passed to the generic_integrator() routine.
* Since generic_integrator() should work with functions passed from various modules, the format of the arguments
* is a bit special:
* - fixed parameters and workspaces are passed through a generic pointer.
* generic_integrator() doesn't know the content of this pointer.
* - the error management is a bit special: errors are not written as usual to pth->error_message, but to a generic
* error_message passed in the list of arguments.
*
* @param tau Input: conformal time
* @param parameters_and_workspace Input: fixed parameters (e.g. indices), workspace, approximation used, etc.
* @param timescale Output: perturbation variation timescale (given the approximation used)
* @param error_message Output: error message
*/
int perturb_timescale(
double tau,
void * parameters_and_workspace,
double * timescale,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */
double tau_k;
/* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */
double tau_h;
/* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */
double tau_c;
/* various pointers allowing to extract the fields of the
parameter_and_workspace input structure */
struct perturb_parameters_and_workspace * pppaw;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
/** - extract the fields of the parameter_and_workspace input structure */
pppaw = parameters_and_workspace;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
/** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */
class_test(pppaw->k == 0.,
ppt->error_message,
"stop to avoid division by zero");
tau_k = 1./pppaw->k;
/** - evaluate background quantities with background_at_tau() and
Hubble time scale \f$ \tau_h = a/a' \f$ */
class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), pvecback),
pba->error_message,
error_message);
class_test(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] == 0.,
error_message,
"aH=0, stop to avoid division by zero");
tau_h = 1./(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]);
/** - for scalars modes: */
if ((ppt->has_scalars == _TRUE_) && (pppaw->index_md == ppt->index_md_scalars)) {
*timescale = tau_h;
if ((ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) || (pba->has_ncdm == _TRUE_))
*timescale = MIN(tau_k,*timescale);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
/** - for vector modes: */
if ((ppt->has_vectors == _TRUE_) && (pppaw->index_md == ppt->index_md_vectors)) {
*timescale = MIN(tau_h,tau_k);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
/** - for tensor modes: */
if ((ppt->has_tensors == _TRUE_) && (pppaw->index_md == ppt->index_md_tensors)) {
*timescale = MIN(tau_h,tau_k);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
return _SUCCESS_;
}
/**
* Compute metric perturbations (those not integrated over time) using Einstein equations
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param y Input: vector of perturbations (those integrated over time) (already allocated)
* @param ppw Input/Output: in output contains the updated metric perturbations
* @return the error status
*/
int perturb_einstein(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double tau,
double * y,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
double k2,a,a2,a_prime_over_a;
double s2_squared;
double shear_g = 0.;
/** - define wavenumber and scale factor related quantities */
k2 = k*k;
a = ppw->pvecback[pba->index_bg_a];
a2 = a * a;
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
s2_squared = 1.-3.*pba->K/k2;
/** - sum up perturbations from all species */
class_call(perturb_total_stress_energy(ppr,pba,pth,ppt,index_md,k,y,ppw),
ppt->error_message,
ppt->error_message);
/** - for scalar modes: */
if (_scalars_) {
/** - --> infer metric perturbations from Einstein equations */
/* newtonian gauge */
if (ppt->gauge == newtonian) {
/* in principle we could get phi from the constrain equation:
ppw->pvecmetric[ppw->index_mt_phi] = -1.5 * (a2/k2/k2/s2/s2) * (k2 * delta_rho + 3.*a_prime_over_a * rho_plus_p_theta);
with s2_squared = sqrt(1-3K/k2) = ppw->s_l[2]*ppw->s_l[2]
This was the case in class v1.3. However the integration is
more stable is we treat phi as a dynamical variable
y[ppw->pv->index_pt_phi], which derivative is given by the
second equation below (credits to Guido Walter Pettinari). */
/* equation for psi */
ppw->pvecmetric[ppw->index_mt_psi] = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear;
/* equation for phi' */
ppw->pvecmetric[ppw->index_mt_phi_prime] = -a_prime_over_a * ppw->pvecmetric[ppw->index_mt_psi] + 1.5 * (a2/k2) * ppw->rho_plus_p_theta;
/* eventually, infer radiation streaming approximation for
gamma and ur (this is exactly the right place to do it
because the result depends on h_prime) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
}
}
/* synchronous gauge */
if (ppt->gauge == synchronous) {
/* first equation involving total density fluctuation */
ppw->pvecmetric[ppw->index_mt_h_prime] =
( k2 * s2_squared * y[ppw->pv->index_pt_eta] + 1.5 * a2 * ppw->delta_rho)/(0.5*a_prime_over_a); /* h' */
/* eventually, infer radiation streaming approximation for
gamma and ur (this is exactly the right place to do it
because the result depends on h_prime) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
/* update total theta given rsa approximation results */
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*ppw->rsa_theta_g;
if (pba->has_ur == _TRUE_) {
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*ppw->rsa_theta_ur;
}
}
/* second equation involving total velocity */
ppw->pvecmetric[ppw->index_mt_eta_prime] = (1.5 * a2 * ppw->rho_plus_p_theta + 0.5 * pba->K * ppw->pvecmetric[ppw->index_mt_h_prime])/k2/s2_squared; /* eta' */
/* third equation involving total pressure */
ppw->pvecmetric[ppw->index_mt_h_prime_prime] =
- 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_h_prime]
+ 2. * k2 * s2_squared * y[ppw->pv->index_pt_eta]
- 9. * a2 * ppw->delta_p;
/* alpha = (h'+6eta')/2k^2 */
ppw->pvecmetric[ppw->index_mt_alpha] = (ppw->pvecmetric[ppw->index_mt_h_prime] + 6.*ppw->pvecmetric[ppw->index_mt_eta_prime])/2./k2;
/* eventually, infer first-order tight-coupling approximation for photon
shear, then correct the total shear */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) {
shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_g]+k2*ppw->pvecmetric[ppw->index_mt_alpha]);
ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g;
}
/* fourth equation involving total shear */
ppw->pvecmetric[ppw->index_mt_alpha_prime] = //TBC
- 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_alpha]
+ y[ppw->pv->index_pt_eta]
- 4.5 * (a2/k2) * ppw->rho_plus_p_shear;
}
/* transform (delta_m, theta_m) of the current gauge into
gauge-independent variables (you could comment this out if you
really want gauge-dependent results) */
if (ppt->has_source_delta_m == _TRUE_) {
ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2;
// note: until 2.4.3 there was a typo, the factor was (-2 H'/H) instead
// of (3 aH). There is the same typo in the CLASSgal paper
// 1307.1459v1,v2,v3. It came from a confusion between (1+w_total)
// and (1+w_matter)=1 [the latter is the relevant one here].
//
// note2: at this point this gauge-invariant variable is only
// valid if all matter components are pressureless and
// stable. This relation will be generalized soon to the case
// of decaying dark matter.
}
if (ppt->has_source_delta_cb == _TRUE_) {
ppw->delta_cb += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_cb/k2;//check gauge transformation
}
if (ppt->has_source_theta_m == _TRUE_) {
if (ppt->gauge == synchronous) {
ppw->theta_m += ppw->pvecmetric[ppw->index_mt_alpha]*k2;
}
}
if (ppt->has_source_theta_cb == _TRUE_){
if (ppt->gauge == synchronous) {
ppw->theta_cb += ppw->pvecmetric[ppw->index_mt_alpha]*k2; //check gauge transformation
}
}
}
/** - for vector modes */
if (_vectors_) {
if (ppt->gauge == newtonian) {
ppw->pvecmetric[ppw->index_mt_V_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_V] - 3.*ppw->vector_source_pi/k;
}
if (ppt->gauge == synchronous) {
// assuming vector_source_pi = p_class a^2 pi_T^{(1)} and vector_source_v = (rho_class+p_class)a^2 v^{(1)}
// from Hu and White:
ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi/k2;
// what we suspect:
//ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi;
// if we use the other equation:
//ppw->pvecmetric[ppw->index_mt_hv_prime] = -2./k/ (1.-2.*pba->K/k2) * 3. * ppw->vector_source_v;
}
}
/** - for tensor modes */
if (_tensors_) {
/* single einstein equation for tensor perturbations */
ppw->pvecmetric[ppw->index_mt_gw_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_gwdot]-(k2+2.*pba->K)*y[ppw->pv->index_pt_gw]+ppw->gw_source;
}
return _SUCCESS_;
}
int perturb_total_stress_energy(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double * y,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
double a,a2,a_prime_over_a,k2;
double rho_plus_p_tot=0.;
double delta_g=0.;
double theta_g=0.;
double shear_g=0.;
double delta_ur=0.;
double theta_ur=0.;
double shear_ur=0.;
double rho_delta_ncdm=0.;
double rho_plus_p_theta_ncdm=0.;
double rho_plus_p_shear_ncdm=0.;
double delta_p_ncdm=0.;
double factor;
double rho_plus_p_ncdm;
int index_q,n_ncdm,idx;
double epsilon,q,q2,cg2_ncdm,w_ncdm,rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm;
double rho_m,delta_rho_m,rho_plus_p_m,rho_plus_p_theta_m;
double w_fld,dw_over_da_fld,integral_fld;
double gwncdm;
double rho_relativistic;
double rho_dr_over_f;
double delta_rho_scf, delta_p_scf, psi;
double c_gamma_k_H_square;
double Gamma_prime_plus_a_prime_over_a_Gamma, alpha=0., s2sq=1.;
/** - wavenumber and scale factor related quantities */
a = ppw->pvecback[pba->index_bg_a];
a2 = a * a;
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
k2 = k*k;
/** - for scalar modes */
if (_scalars_) {
/** - --> (a) deal with approximation schemes */
/** - ---> (a.1.) photons */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** - ----> (a.1.1.) no approximation */
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
shear_g = y[ppw->pv->index_pt_shear_g];
}
else {
/** - ----> (a.1.2.) radiation streaming approximation */
delta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */
theta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */
shear_g = 0.; /* shear always neglected in radiation streaming approximation */
}
}
else {
/** - ----> (a.1.3.) tight coupling approximation */
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
/* first-order tight-coupling approximation for photon shear */
if (ppt->gauge == newtonian) {
shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_g];
}
else {
shear_g = 0.; /* in the synchronous gauge, the expression of
shear_g (at first-order in a tight-coupling
expansion) is a function of h' and eta'; but h'
and eta' are calculated in perturb_einstein()
as a function of delta_g and theta_g. Hence,
we set shear_g temporarily to zero, and set it
to the right first-order value in
perturb_einstein(), just before using the
Einstein equation for the shear. */
}
}
/** - ---> (a.2.) ur */
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_ur = y[ppw->pv->index_pt_delta_ur];
theta_ur = y[ppw->pv->index_pt_theta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
}
else {
delta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */
theta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */
shear_ur = 0.; /* shear always neglected in free streaming approximation */
}
}
/** - --> (b) compute the total density, velocity and shear perturbations */
/* photon and baryon contribution */
ppw->delta_rho = ppw->pvecback[pba->index_bg_rho_g]*delta_g
+ ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b];
ppw->rho_plus_p_theta = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*theta_g
+ ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b];
ppw->rho_plus_p_shear = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g;
ppw->delta_p = 1./3.*ppw->pvecback[pba->index_bg_rho_g]*delta_g
+ ppw->pvecthermo[pth->index_th_cb2]*ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b];
rho_plus_p_tot = 4./3. * ppw->pvecback[pba->index_bg_rho_g] + ppw->pvecback[pba->index_bg_rho_b];
/* cdm contribution */
if (pba->has_cdm == _TRUE_) {
ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == newtonian)
ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm];
rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_cdm];
}
/* dcdm contribution */
if (pba->has_dcdm == _TRUE_) {
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm];
ppw->rho_plus_p_theta += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm];
rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_dcdm];
}
/* ultra-relativistic decay radiation */
if (pba->has_dr == _TRUE_) {
/* We have delta_rho_dr = rho_dr * F0_dr / f, where F follows the
convention in astro-ph/9907388 and f is defined as
f = rho_dr*a^4/rho_crit_today. In CLASS density units
rho_crit_today = H0^2.
*/
rho_dr_over_f = pow(pba->H0/a2,2);
ppw->delta_rho += rho_dr_over_f*y[ppw->pv->index_pt_F0_dr];
ppw->rho_plus_p_theta += 4./3.*3./4*k*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+1];
ppw->rho_plus_p_shear += 2./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+2];
ppw->delta_p += 1./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr];
rho_plus_p_tot += 4./3. * ppw->pvecback[pba->index_bg_rho_dr];
}
/* ultra-relativistic neutrino/relics contribution */
if (pba->has_ur == _TRUE_) {
ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_ur]*delta_ur;
ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*theta_ur;
ppw->rho_plus_p_shear = ppw->rho_plus_p_shear + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*shear_ur;
ppw->delta_p += 1./3.*ppw->pvecback[pba->index_bg_rho_ur]*delta_ur;
rho_plus_p_tot += 4./3. * ppw->pvecback[pba->index_bg_rho_ur];
}
/* non-cold dark matter contribution */
if (pba->has_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){
// The perturbations are evolved integrated:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_ncdm_bg = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
p_ncdm_bg = ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
pseudo_p_ncdm = ppw->pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm];
rho_plus_p_ncdm = rho_ncdm_bg + p_ncdm_bg;
w_ncdm = p_ncdm_bg/rho_ncdm_bg;
cg2_ncdm = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg));
if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
ppw->delta_ncdm[n_ncdm] = y[idx];
ppw->theta_ncdm[n_ncdm] = y[idx+1];
ppw->shear_ncdm[n_ncdm] = y[idx+2];
}
ppw->delta_rho += rho_ncdm_bg*y[idx];
ppw->rho_plus_p_theta += rho_plus_p_ncdm*y[idx+1];
ppw->rho_plus_p_shear += rho_plus_p_ncdm*y[idx+2];
ppw->delta_p += cg2_ncdm*rho_ncdm_bg*y[idx];
rho_plus_p_tot += rho_plus_p_ncdm;
idx += ppw->pv->l_max_ncdm[n_ncdm]+1;
}
}
else{
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
ppw->delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
ppw->theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
ppw->shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
}
ppw->delta_rho += rho_delta_ncdm;
ppw->rho_plus_p_theta += rho_plus_p_theta_ncdm;
ppw->rho_plus_p_shear += rho_plus_p_shear_ncdm;
ppw->delta_p += delta_p_ncdm;
rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
}
}
}
/* scalar field contribution.
In Newtonian gauge, delta_scf depends on the metric perturbation psi which is inferred
from rho_plus_p_shear. So the contribution from the scalar field must be below all
species with non-zero shear.
*/
if (pba->has_scf == _TRUE_) {
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
delta_p_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
- ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
/* equation for psi */
psi = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k/k) * ppw->rho_plus_p_shear;
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi);
delta_p_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
- ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi);
}
ppw->delta_rho += delta_rho_scf;
ppw->rho_plus_p_theta += 1./3.*
k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
ppw->delta_p += delta_p_scf;
rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_scf]+ppw->pvecback[pba->index_bg_p_scf];
}
/* add your extra species here */
/* fluid contribution */
/************************/
/* For use with CONCEPT */
/************************/
/**
* Count up total pressure and conformal time derivative of pressure,
* excluding the fld species. These are used for the PPF formalism of fld.
*/
double p_tot = 0.;
double p_tot_prime = 0.;
if (pba->has_fld == _TRUE_ && pba->use_ppf == _TRUE_) {
/* Photons */
p_tot += 1./3.*ppw->pvecback[pba->index_bg_rho_g];
p_tot_prime += -3.*a_prime_over_a*(1. + 1./3.)*1./3.
*ppw->pvecback[pba->index_bg_rho_g];
/* Baryons have no pressure */
/* Ultra relativistic species */
if (pba->has_ur == _TRUE_) {
p_tot += 1./3.*ppw->pvecback[pba->index_bg_rho_ur];
p_tot_prime += -3.*a_prime_over_a*(1. + 1./3.)*1./3.
*ppw->pvecback[pba->index_bg_rho_ur];
}
/* Cold dark matter has no pressure */
/* Non-cold dark matter */
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++) {
p_tot += ppw->pvecback[pba->index_bg_p_ncdm1 + n_ncdm];
p_tot_prime += -a_prime_over_a*(5.*ppw->pvecback[pba->index_bg_p_ncdm1 + n_ncdm]
- ppw->pvecback[pba->index_bg_pseudo_p_ncdm1 + n_ncdm]);
}
}
/* Decaying cold dark matter has no pressure */
/* Decay radiation */
if (pba->has_dr == _TRUE_) {
p_tot += 1./3.*ppw->pvecback[pba->index_bg_rho_dr];
p_tot_prime += -3.*a_prime_over_a*(1. + 1./3.)*1./3.
*ppw->pvecback[pba->index_bg_rho_dr]
+ 1./3.*a*pba->Gamma_dcdm*ppw->pvecback[pba->index_bg_rho_dcdm];
}
/* Importantly, we skip the dark energy fluid */
/* Scalar field */
if (pba->has_scf == _TRUE_) {
p_tot += ppw->pvecback[pba->index_bg_p_scf];
p_tot_prime += -a_prime_over_a/(a*a)*ppw->pvecback[pba->index_bg_phi_prime_scf]
*ppw->pvecback[pba->index_bg_phi_prime_scf]
- 2./3.*ppw->pvecback[pba->index_bg_dV_scf]
*ppw->pvecback[pba->index_bg_phi_prime_scf];
}
/* Lambda has constant pressure */
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
if (pba->has_fld == _TRUE_) {
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
/************************/
/* For use with CONCEPT */
/************************/
double w_prime_fld = dw_over_da_fld*a_prime_over_a*a;
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
if (pba->use_ppf == _FALSE_) {
ppw->delta_rho_fld = ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld];
ppw->rho_plus_p_theta_fld = (1.+w_fld)*ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_theta_fld];
/************************/
/* For use with CONCEPT */
/************************/
/* Pressure perturbation of fld without PPF */
double ca2_fld = w_fld - w_prime_fld/(3.*a_prime_over_a*(1. + w_fld));
ppw->delta_p_fld = pba->cs2_fld*ppw->delta_rho_fld
+ (pba->cs2_fld - ca2_fld)*(3.*a_prime_over_a*ppw->rho_plus_p_theta_fld/k2);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
else {
s2sq = ppw->s_l[2]*ppw->s_l[2];
/************************/
/* For use with CONCEPT */
/************************/
/**
* The computation of Gamma_fld and Gamma_prime_fld becomes unstable
* at large c_Gamma*k/H. To stabilise the system we set these to zero
* at some large c_Gamma*k/(aH).
* As to not introduce discontinuities, we have a smooth transition
* phase between the untouched values and completely nullified values.
* This transition is given the shape of an error function in
* log(c_Gamma*k/(aH)) space. The parameters c_gamma_k_H_square_max_{0|1}
* specify the borders of the transition.
* Here we nullify/shrink Gamma_fld only.
*/
double Gamma_fld, Gamma_weight, Gamma_weight_steepness;
double c_gamma_k_H_square_max_0, c_gamma_k_H_square_max_1;
c_gamma_k_H_square_max_0 = 1e+3;
c_gamma_k_H_square_max_1 = 1e+4;
c_gamma_k_H_square = pow(pba->c_gamma_over_c_fld*k/a_prime_over_a, 2)*pba->cs2_fld;
if (c_gamma_k_H_square > c_gamma_k_H_square_max_1){
Gamma_fld = 0.;
} else {
Gamma_fld = y[ppw->pv->index_pt_Gamma_fld];
if (c_gamma_k_H_square > c_gamma_k_H_square_max_0){
Gamma_weight_steepness = 5.; /* 5 results in double precision perfect transition */
Gamma_weight = 0.5*(erf(Gamma_weight_steepness*(
0.5*(log(c_gamma_k_H_square_max_0) + log(c_gamma_k_H_square_max_1))
- log(c_gamma_k_H_square)
)) + 1.);
Gamma_fld *= Gamma_weight;
}
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
double alpha_prime, X, Y, Z, X_prime, Y_prime, Z_prime;
double rho_plus_p_theta_fld_prime, metric_euler;
double rho_t, rho_t_prime, p_t, p_t_prime, rho_fld, rho_fld_prime, p_fld, p_fld_prime;
double H, H_prime;
double theta_t,theta_t_prime, S, S_prime;
if (ppt->gauge == synchronous) {
alpha = (y[ppw->pv->index_pt_eta] + 1.5*a2/k2/s2sq*(ppw->delta_rho
+ 3.*a_prime_over_a/k2*ppw->rho_plus_p_theta)
- Gamma_fld)/a_prime_over_a;
alpha_prime = -2.*a_prime_over_a*alpha + y[ppw->pv->index_pt_eta]
- 4.5*(a2/k2)*ppw->rho_plus_p_shear;
metric_euler = 0.;
} else {
alpha = 0.;
alpha_prime = 0.;
metric_euler = k2*y[ppw->pv->index_pt_phi] - 4.5*a2*ppw->rho_plus_p_shear;
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
ppw->S_fld = ppw->pvecback[pba->index_bg_rho_fld]*(1.+w_fld)*1.5*a2/k2/a_prime_over_a*
(ppw->rho_plus_p_theta/rho_plus_p_tot+k2*alpha);
// note that the last terms in the ratio do not include fld, that's correct, it's the whole point of the PPF scheme
/************************/
/* For use with CONCEPT */
/************************/
/* Nullify/shrink Gamma_prime_fld as done for Gamma_fld above */
if (c_gamma_k_H_square > c_gamma_k_H_square_max_1){
ppw->Gamma_prime_fld = 0.;
} else {
ppw->Gamma_prime_fld = a_prime_over_a*(ppw->S_fld/(1. + c_gamma_k_H_square)
- (1. + c_gamma_k_H_square)*Gamma_fld);
if (c_gamma_k_H_square > c_gamma_k_H_square_max_0){
ppw->Gamma_prime_fld *= Gamma_weight;
}
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
Gamma_prime_plus_a_prime_over_a_Gamma = ppw->Gamma_prime_fld+a_prime_over_a*Gamma_fld;
// delta and theta in both gauges gauge:
ppw->rho_plus_p_theta_fld = ppw->pvecback[pba->index_bg_rho_fld]*(1.+w_fld)*ppw->rho_plus_p_theta/rho_plus_p_tot-
k2*2./3.*a_prime_over_a/a2/(1+4.5*a2/k2/s2sq*rho_plus_p_tot)*
(ppw->S_fld-Gamma_prime_plus_a_prime_over_a_Gamma/a_prime_over_a);
ppw->delta_rho_fld = -2./3.*k2*s2sq/a2*Gamma_fld-3*a_prime_over_a/k2*ppw->rho_plus_p_theta_fld;
/************************/
/* For use with CONCEPT */
/************************/
rho_t = rho_plus_p_tot - p_tot;
p_t = p_tot;
rho_t_prime = -3.*a_prime_over_a*(rho_t + p_t);
p_t_prime = p_tot_prime;
rho_fld = ppw->pvecback[pba->index_bg_rho_fld];
p_fld = w_fld*rho_fld;
rho_fld_prime = -3.*a_prime_over_a*(rho_fld + p_fld);
p_fld_prime = w_prime_fld*rho_fld - 3.*a_prime_over_a*(1. + w_fld)*p_fld;
H = ppw->pvecback[pba->index_bg_H];
H_prime = ppw->pvecback[pba->index_bg_H_prime];
X = c_gamma_k_H_square;
X_prime = -2.*X*(a_prime_over_a + H_prime/H);
Y = 4.5*a2/k2/s2sq*(rho_t + p_t);
Y_prime = Y*(2.*a_prime_over_a + (rho_t_prime + p_t_prime)/(rho_t + p_t));
Z = 2./3.*k2*H/a;
Z_prime = Z*(H_prime/H - a_prime_over_a);
theta_t = ppw->rho_plus_p_theta/rho_plus_p_tot;
theta_t_prime = -a_prime_over_a*theta_t + (-p_t_prime*theta_t + k2*ppw->delta_p
- k2*ppw->rho_plus_p_shear)/rho_plus_p_tot+metric_euler;
S = ppw->S_fld;
S_prime = -Z_prime/Z*S + 1./Z*(rho_fld_prime + p_fld_prime)*(theta_t + k2*alpha)
+ 1./Z*(rho_fld + p_fld)*(theta_t_prime + k2*alpha_prime);
rho_plus_p_theta_fld_prime = Z_prime*(S - 1./(1. + Y)*(S/(1. + 1./X)
+ Gamma_fld*X))
+ Z*(S_prime + Y_prime/(1. + Y*Y + 2.*Y)*(S/(1. + 1./X)
+ Gamma_fld*X)
- 1./(1. + Y)*(S_prime/(1. + 1./X) + S*X_prime/(1. + X*X + 2.*X)
+ ppw->Gamma_prime_fld*X + Gamma_fld*X_prime))
- k2*alpha_prime*(rho_fld + p_fld) - k2*alpha*(rho_fld_prime + p_fld_prime);
ppw->delta_p_fld = (rho_plus_p_theta_fld_prime
+ 4.*a_prime_over_a*ppw->rho_plus_p_theta_fld - (rho_fld + p_fld)*metric_euler)/k2;
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
ppw->delta_rho += ppw->delta_rho_fld;
ppw->rho_plus_p_theta += ppw->rho_plus_p_theta_fld;
/************************/
/* For use with CONCEPT */
/************************/
ppw->delta_p += ppw->delta_p_fld;
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
/* don't add species here, add them before the fluid contribution: because of the PPF scheme that one must be the last one! */
/* store delta_m in the current gauge. In perturb_einstein, this
will be transformed later on into the gauge-independent variable D
= delta_m - 2H'/H \theta_m/k^2 . */
if (ppt->has_source_delta_m == _TRUE_) {
/* include baryons and cold dark matter */
delta_rho_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b];
rho_m = ppw->pvecback[pba->index_bg_rho_b];
if (pba->has_cdm == _TRUE_) {
delta_rho_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm];
rho_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
/* include decaying cold dark matter */
if (pba->has_dcdm == _TRUE_) {
delta_rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm];
rho_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
/* infer delta_cb */
if (ppt->has_source_delta_cb)
ppw->delta_cb = delta_rho_m/rho_m;
/* include any other species non-relativistic today (like ncdm species) */
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
delta_rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]*ppw->delta_ncdm[n_ncdm];
rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
}
}
/* infer delta_m */
ppw->delta_m = delta_rho_m/rho_m;
}
/* store theta_m in the current gauge. In perturb_einstein, this
will be transformed later on into the gauge-independent variable
Theta . Note that computing theta_m is necessary also if we want
the delta_m source only, because the gauge-invariant delta_m
involves theta_m in the current gauge. */
if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) {
/* include baryons and cold dark matter */
rho_plus_p_theta_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b];
rho_plus_p_m = ppw->pvecback[pba->index_bg_rho_b];
if (pba->has_cdm == _TRUE_) {
if (ppt->gauge == newtonian)
rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm];
rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
if (pba->has_dcdm == _TRUE_) {
rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm];
rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
if ((ppt->has_source_delta_cb == _TRUE_) || (ppt->has_source_theta_cb == _TRUE_))
ppw->theta_cb = rho_plus_p_theta_m/rho_plus_p_m;
/* include any other species non-relativistic today (like ncdm species) */
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_plus_p_theta_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm])*ppw->theta_ncdm[n_ncdm];
rho_plus_p_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
}
}
/* infer theta_m */
ppw->theta_m = rho_plus_p_theta_m/rho_plus_p_m;
}
}
/** - for vector modes */
if (_vectors_) {
ppw->vector_source_pi = 0.;
ppw->vector_source_v = 0.;
/** - --> photon contribution to vector sources: */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppw->vector_source_v += 4./3.*a2*ppw->pvecback[pba->index_bg_rho_g]
* (-1./4.*_SQRT2_)
* (y[ppw->pv->index_pt_delta_g]+2.*y[ppw->pv->index_pt_delta_g]+y[ppw->pv->index_pt_shear_g]);
ppw->vector_source_pi += 1./3.*a2*ppw->pvecback[pba->index_bg_rho_g]
* (6.*_SQRT2_/5./sqrt(1.-2.*pba->K/k/k))
* (4./3./k*y[ppw->pv->index_pt_theta_g]+y[ppw->pv->index_pt_l3_g]);
}
}
/** - --> baryons */
}
/** - for tensor modes */
if (_tensors_) {
ppw->gw_source = 0.0;
/** - --> photon contribution to gravitational wave source: */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppw->gw_source += (-_SQRT6_*4*a2*ppw->pvecback[pba->index_bg_rho_g]*
(1./15.*y[ppw->pv->index_pt_delta_g]+
4./21.*y[ppw->pv->index_pt_shear_g]+
1./35.*y[ppw->pv->index_pt_l3_g+1]));
}
}
/** - --> ur contribution to gravitational wave source: */
if (ppt->evolve_tensor_ur == _TRUE_){
rho_relativistic = 0.;
if (ppt->tensor_method == tm_exact)
rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur];
if (ppt->tensor_method == tm_massless_approximation) {
if (pba->has_ur == _TRUE_)
rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur];
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++) {
/* (3 p_ncdm1) is the "relativistic" contribution to rho_ncdm1 */
rho_relativistic += 3.*ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
}
}
}
ppw->gw_source += (-_SQRT6_*4*a2*rho_relativistic*
(1./15.*y[ppw->pv->index_pt_delta_ur]+
4./21.*y[ppw->pv->index_pt_shear_ur]+
1./35.*y[ppw->pv->index_pt_l3_ur+1]));
}
/** - --> ncdm contribution to gravitational wave source: */
if (ppt->evolve_tensor_ncdm == _TRUE_){
idx = ppw->pv->index_pt_psi0_ncdm1;
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
gwncdm = 0.;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
gwncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*(1./15.*y[idx]+2./21.*y[idx+2]+1./35.*y[idx+4]);
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
gwncdm *= -_SQRT6_*4*a2*factor;
ppw->gw_source += gwncdm;
}
}
}
return _SUCCESS_;
}
/**
* Compute the source functions (three terms for temperature, one for
* E or B modes, etc.)
*
* This is one of the few functions in the code which is passed to
* the generic_integrator() routine. Since generic_integrator()
* should work with functions passed from various modules, the format
* of the arguments is a bit special:
*
* - fixed parameters and workspaces are passed through a generic
* pointer. generic_integrator() doesn't know the content of this
* pointer.
*
* - the error management is a bit special: errors are not written as
* usual to pth->error_message, but to a generic error_message passed
* in the list of arguments.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Input: vector of time derivative of perturbations
* @param index_tau Input: index in the array tau_sampling
* @param parameters_and_workspace Input/Output: in input, all parameters needed by perturb_derivs, in output, source terms
* @param error_message Output: error message
* @return the error status
*/
int perturb_sources(
double tau,
double * y,
double * dy,
int index_tau,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
double P;
int index_type;
struct perturb_parameters_and_workspace * pppaw;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
int index_md;
int index_ic;
int index_k;
double k;
double z;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double delta_g, delta_rho_scf, rho_plus_p_theta_scf;
double a_prime_over_a=0.; /* (a'/a) */
double a_prime_over_a_prime=0.; /* (a'/a)' */
double w_fld,dw_over_da_fld,integral_fld;
int switch_isw = 1;
double a_rel, a2_rel, f_dr;
/** - rename structure fields (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
index_md = pppaw->index_md;
index_ic = pppaw->index_ic;
index_k = pppaw->index_k;
k = pppaw->k;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
/** - get background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
z = pba->a_today/pvecback[pba->index_bg_a]-1.;
class_call(thermodynamics_at_z(pba,
pth,
z, /* redshift z=1/a-1 */
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
a_rel = ppw->pvecback[pba->index_bg_a]/pba->a_today;
a2_rel = a_rel * a_rel;
/* derived background quantities, useful only in synchronous gauge */
if (ppt->gauge == synchronous) {
a_prime_over_a = pvecback[pba->index_bg_a] * pvecback[pba->index_bg_H]; /* (a'/a)=aH */
a_prime_over_a_prime = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + pow(pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a],2); /* (a'/a)' = aH'+(aH)^2 */
}
/** - for scalars */
if (_scalars_) {
/** - --> compute metric perturbations */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
/** - --> compute quantities depending on approximation schemes */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
delta_g = ppw->rsa_delta_g;
P = 0.;
}
else {
delta_g = y[ppw->pv->index_pt_delta_g];
if (ppw->approx[ppw->index_ap_tca] == (int)tca_on)
P = 5.* ppw->s_l[2] * ppw->tca_shear_g/8.; /* (2.5+0.5+2)shear_g/8 */
else
P = (y[ppw->pv->index_pt_pol0_g] + y[ppw->pv->index_pt_pol2_g] + 2.* ppw->s_l[2] *y[ppw->pv->index_pt_shear_g])/8.;
}
/** - --> for each type, compute source terms */
/* scalar temperature */
if (ppt->has_source_t == _TRUE_) {
/* check whether integrated Sachs-Wolf term should be included */
if ((ppt->switch_eisw == 0) && (z >= ppt->eisw_lisw_split_z)){
switch_isw = 0;
}
if ((ppt->switch_lisw == 0) && (z < ppt->eisw_lisw_split_z)) {
switch_isw=0;
}
/* newtonian gauge: simplest form, not efficient numerically */
/*
if (ppt->gauge == newtonian) {
_set_source_(ppt->index_tp_t0) = pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_phi_prime] + pvecthermo[pth->index_th_g] * delta_g / 4.;
_set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_exp_m_kappa] * k* pvecmetric[ppw->index_mt_psi] + pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b]/k;
_set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_g] * P;
}
*/
/* newtonian gauge: slightly more complicated form, but more efficient numerically */
if (ppt->gauge == newtonian) {
_set_source_(ppt->index_tp_t0) =
ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g / 4. + pvecmetric[ppw->index_mt_psi])
+ switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_phi]-pvecmetric[ppw->index_mt_psi])
+ pvecthermo[pth->index_th_exp_m_kappa] * 2. * pvecmetric[ppw->index_mt_phi_prime])
+ ppt->switch_dop /k/k * (pvecthermo[pth->index_th_g] * dy[ppw->pv->index_pt_theta_b]
+ pvecthermo[pth->index_th_dg] * y[ppw->pv->index_pt_theta_b]);
_set_source_(ppt->index_tp_t1) = switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k* (pvecmetric[ppw->index_mt_psi]-y[ppw->pv->index_pt_phi]);
_set_source_(ppt->index_tp_t2) = ppt->switch_pol * pvecthermo[pth->index_th_g] * P;
}
/* synchronous gauge: simplest form, not efficient numerically */
/*
if (ppt->gauge == synchronous) {
_set_source_(ppt->index_tp_t0) = - pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_h_prime] / 6. + pvecthermo[pth->index_th_g] / 4. * delta_g;
_set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b] / k;
_set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_exp_m_kappa] * k*k* 2./3. * ppw->s_l[2] * pvecmetric[ppw->index_mt_alpha] + pvecthermo[pth->index_th_g] * P;
}
*/
/* synchronous gauge: slightly more complicated form, but more efficient numerically */
if (ppt->gauge == synchronous) {
_set_source_(ppt->index_tp_t0) =
ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g/4. + pvecmetric[ppw->index_mt_alpha_prime])
+ switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_eta]
- pvecmetric[ppw->index_mt_alpha_prime]
- 2 * a_prime_over_a * pvecmetric[ppw->index_mt_alpha])
+ pvecthermo[pth->index_th_exp_m_kappa] * 2. * (pvecmetric[ppw->index_mt_eta_prime]
- a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha]
- a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime]))
+ ppt->switch_dop * (pvecthermo[pth->index_th_g] * (dy[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha_prime])
+pvecthermo[pth->index_th_dg] * (y[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha]));
_set_source_(ppt->index_tp_t1) =
switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k * (pvecmetric[ppw->index_mt_alpha_prime]
+ 2. * a_prime_over_a * pvecmetric[ppw->index_mt_alpha]
- y[ppw->pv->index_pt_eta]);
_set_source_(ppt->index_tp_t2) =
ppt->switch_pol * pvecthermo[pth->index_th_g] * P;
}
}
/* scalar polarization */
if (ppt->has_source_p == _TRUE_) {
/* all gauges. Note that the correct formula for the E source
should have a minus sign, as shown in Hu & White. We put a
plus sign to comply with the 'historical convention'
established in CMBFAST and CAMB. */
_set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P;
}
/* now, non-CMB sources */
/* Bardeen potential -PHI_H = phi in Newtonian gauge */
if (ppt->has_source_phi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_phi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_eta] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha];
}
/* its derivative phi' */
if (ppt->has_source_phi_prime == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_phi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_eta]
- a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha]
- a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime];
}
/* diff of Bardeen potentials PHI_A-PHI_H = psi + phi in newtonian gauge */
if (ppt->has_source_phi_plus_psi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi_plus_psi) =
y[ppw->pv->index_pt_phi] + pvecmetric[ppw->index_mt_psi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi_plus_psi) =
y[ppw->pv->index_pt_eta] + pvecmetric[ppw->index_mt_alpha_prime];
}
/* Bardeen potential PHI_A = psi in newtonian gauge */
if (ppt->has_source_psi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_psi) =
pvecmetric[ppw->index_mt_psi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_psi) =
a_prime_over_a * pvecmetric[ppw->index_mt_alpha] + pvecmetric[ppw->index_mt_alpha_prime];
}
/* the metric potentials h and eta in synchronous gauge */
if (ppt->gauge == synchronous) {
/* cdm is always on in synchronous gauge, see error message above that checks gauge and has_cdm */
if (ppt->has_source_h == _TRUE_)
_set_source_(ppt->index_tp_h) = - 2 * y[ppw->pv->index_pt_delta_cdm];
if (ppt->has_source_h_prime == _TRUE_)
_set_source_(ppt->index_tp_h_prime) = pvecmetric[ppw->index_mt_h_prime];
if (ppt->has_source_eta == _TRUE_)
_set_source_(ppt->index_tp_eta) = y[ppw->pv->index_pt_eta];
if (ppt->has_source_eta_prime == _TRUE_)
_set_source_(ppt->index_tp_eta_prime) = dy[ppw->pv->index_pt_eta];
}
/* total matter over density (gauge-invariant, defined as in arXiv:1307.1459) */
if (ppt->has_source_delta_m == _TRUE_) {
_set_source_(ppt->index_tp_delta_m) = ppw->delta_m;
}
/* cdm and baryon over density */
if (ppt->has_source_delta_cb == _TRUE_) {
_set_source_(ppt->index_tp_delta_cb) = ppw->delta_cb;
}
/* delta_g */
if (ppt->has_source_delta_g == _TRUE_) {
_set_source_(ppt->index_tp_delta_g) = delta_g;
}
/* delta_baryon */
if (ppt->has_source_delta_b == _TRUE_) {
_set_source_(ppt->index_tp_delta_b) = y[ppw->pv->index_pt_delta_b];
}
/* delta_cdm */
if (ppt->has_source_delta_cdm == _TRUE_) {
_set_source_(ppt->index_tp_delta_cdm) = y[ppw->pv->index_pt_delta_cdm];
}
/* delta_dcdm */
if (ppt->has_source_delta_dcdm == _TRUE_) {
_set_source_(ppt->index_tp_delta_dcdm) = y[ppw->pv->index_pt_delta_dcdm];
}
/* delta_fld */
if (ppt->has_source_delta_fld == _TRUE_) {
_set_source_(ppt->index_tp_delta_fld) = ppw->delta_rho_fld/pvecback[pba->index_bg_rho_fld];
}
/* delta_scf */
if (ppt->has_source_delta_scf == _TRUE_) {
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
delta_rho_scf = 1./3.*
(1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2_rel*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]);
}
_set_source_(ppt->index_tp_delta_scf) = delta_rho_scf/pvecback[pba->index_bg_rho_scf];
}
/* delta_dr */
if (ppt->has_source_delta_dr == _TRUE_) {
f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
_set_source_(ppt->index_tp_delta_dr) = y[ppw->pv->index_pt_F0_dr]/f_dr;
}
/* delta_ur */
if (ppt->has_source_delta_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_delta_ur) = y[ppw->pv->index_pt_delta_ur];
else
_set_source_(ppt->index_tp_delta_ur) = ppw->rsa_delta_ur;
}
/* delta_ncdm1 */
if (ppt->has_source_delta_ncdm == _TRUE_) {
for (index_type = ppt->index_tp_delta_ncdm1; index_type < ppt->index_tp_delta_ncdm1+pba->N_ncdm; index_type++) {
_set_source_(index_type) = ppw->delta_ncdm[index_type - ppt->index_tp_delta_ncdm1];
}
}
/* total velocity (gauge-invariant, defined as in arXiv:1307.1459) */
if (ppt->has_source_theta_m == _TRUE_) {
_set_source_(ppt->index_tp_theta_m) = ppw->theta_m;
}
/* cdm and baryon velocity */
if (ppt->has_source_theta_cb == _TRUE_) {
_set_source_(ppt->index_tp_theta_cb) = ppw->theta_cb;
}
/* theta_g */
if (ppt->has_source_theta_g == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_theta_g) = y[ppw->pv->index_pt_theta_g];
else
_set_source_(ppt->index_tp_theta_g) = ppw->rsa_theta_g;
}
/* theta_baryon */
if (ppt->has_source_theta_b == _TRUE_) {
_set_source_(ppt->index_tp_theta_b) = y[ppw->pv->index_pt_theta_b];
}
/* theta_cdm */
if (ppt->has_source_theta_cdm == _TRUE_) {
_set_source_(ppt->index_tp_theta_cdm) = y[ppw->pv->index_pt_theta_cdm];
}
/* theta_dcdm */
if (ppt->has_source_theta_dcdm == _TRUE_) {
_set_source_(ppt->index_tp_theta_dcdm) = y[ppw->pv->index_pt_theta_dcdm];
}
/* theta_fld */
if (ppt->has_source_theta_fld == _TRUE_) {
class_call(background_w_fld(pba,a_rel*pba->a_today,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
_set_source_(ppt->index_tp_theta_fld) = ppw->rho_plus_p_theta_fld/(1.+w_fld)/pvecback[pba->index_bg_rho_fld];
}
/* theta_scf */
if (ppt->has_source_theta_scf == _TRUE_) {
rho_plus_p_theta_scf = 1./3.*
k*k/a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
_set_source_(ppt->index_tp_theta_scf) = rho_plus_p_theta_scf/
(pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]);
}
/* theta_dr */
if (ppt->has_source_theta_dr == _TRUE_) {
f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
_set_source_(ppt->index_tp_theta_dr) = 3./4.*k*y[ppw->pv->index_pt_F0_dr+1]/f_dr;
}
/* theta_ur */
if (ppt->has_source_theta_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_theta_ur) = y[ppw->pv->index_pt_theta_ur];
else
_set_source_(ppt->index_tp_theta_ur) = ppw->rsa_theta_ur;
}
/* theta_ncdm1 */
if (ppt->has_source_theta_ncdm == _TRUE_) {
for (index_type = ppt->index_tp_theta_ncdm1; index_type < ppt->index_tp_theta_ncdm1+pba->N_ncdm; index_type++) {
_set_source_(index_type) = ppw->theta_ncdm[index_type - ppt->index_tp_theta_ncdm1];
}
}
}
/** - for tensors */
if (_tensors_) {
/** - --> compute quantities depending on approximation schemes */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
P = -(1./10.*y[ppw->pv->index_pt_delta_g]
+2./7.*y[ppw->pv->index_pt_shear_g]
+3./70.*y[ppw->pv->index_pt_delta_g+4]
-3./5.*y[ppw->pv->index_pt_pol0_g]
+6./7.*y[ppw->pv->index_pt_pol2_g]
-3./70.*y[ppw->pv->index_pt_pol0_g+4])
/sqrt(6.);
}
else {
P = 2./5.*_SQRT6_*y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC
}
}
else {
P = 0.;
}
/* tensor temperature */
if (ppt->has_source_t == _TRUE_) {
_set_source_(ppt->index_tp_t2) = - y[ppw->pv->index_pt_gwdot] * pvecthermo[pth->index_th_exp_m_kappa] + pvecthermo[pth->index_th_g] * P;
}
/* tensor polarization */
if (ppt->has_source_p == _TRUE_) {
/* Note that the correct formula for the polarization source
should have a minus sign, as shown in Hu & White. We put a
plus sign to comply with the 'historical convention'
established in CMBFAST and CAMB. */
_set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P;
}
}
return _SUCCESS_;
}
/**
* When testing the code or a cosmological model, it can be useful to
* output perturbations at each step of integration (and not just the
* delta's at each source sampling point, which is achieved simply by
* asking for matter transfer functions). Then this function can be
* passed to the generic_evolver routine.
*
* By default, instead of passing this function to generic_evolver,
* one passes a null pointer. Then this function is just not used.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Input: vector of its derivatives (already allocated)
* @param parameters_and_workspace Input: fixed parameters (e.g. indices)
* @param error_message Output: error message
*
*/
int perturb_print_variables(double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
struct perturb_parameters_and_workspace * pppaw;
/** Summary: */
/** - define local variables */
double k;
int index_md;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double delta_g,theta_g,shear_g,l4_g,pol0_g,pol1_g,pol2_g,pol4_g;
double delta_b,theta_b;
double delta_cdm=0.,theta_cdm=0.;
double delta_dcdm=0.,theta_dcdm=0.;
double delta_dr=0.,theta_dr=0.,shear_dr=0., f_dr=1.0;
double delta_ur=0.,theta_ur=0.,shear_ur=0.,l4_ur=0.;
double delta_rho_scf=0., rho_plus_p_theta_scf=0.;
double delta_scf=0., theta_scf=0.;
/** - ncdm sector begins */
int n_ncdm;
double *delta_ncdm=NULL, *theta_ncdm=NULL, *shear_ncdm=NULL, *delta_p_over_delta_rho_ncdm=NULL;
double rho_ncdm_bg, p_ncdm_bg, pseudo_p_ncdm, w_ncdm;
double rho_delta_ncdm = 0.0;
double rho_plus_p_theta_ncdm = 0.0;
double rho_plus_p_shear_ncdm = 0.0;
double delta_p_ncdm = 0.0;
double factor = 0.0;
double q,q2,epsilon;
/** - ncdm sector ends */
double phi=0.,psi=0.,alpha=0.;
double phi_prime=0.; //CGT
double delta_temp=0., delta_chi=0.;
double a,a2,H;
int idx,index_q, storeidx;
int index_l;
double *dataptr;
/************************/
/* For use with CONCEPT */
/************************/
/**
* Compute perturbation derivatives. This also ensures that the
* ppw (and other) structs are up-to-date. This is important
* when using the Runge-Kutta evolver, as this is otherwise
* not taken care off correctly.
*/
class_call(
perturb_derivs(tau, y, dy, parameters_and_workspace, error_message),
error_message,
error_message);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/** - rename structure fields (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
index_md = pppaw->index_md;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
/** - update background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1.,
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
/** - update metric perturbations in this point */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
a = pvecback[pba->index_bg_a];
/************************/
/* For use with CONCEPT */
/************************/
double dlnf0_dlnq;
/* Only return output at late times */
double a_min = 3e-4;
if (a < a_min)
return _SUCCESS_;
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
a2 = a*a;
H = pvecback[pba->index_bg_H];
if (pba->has_ncdm == _TRUE_){
class_alloc(delta_ncdm, sizeof(double)*pba->N_ncdm,error_message);
class_alloc(theta_ncdm, sizeof(double)*pba->N_ncdm,error_message);
class_alloc(shear_ncdm, sizeof(double)*pba->N_ncdm,error_message);
class_alloc(delta_p_over_delta_rho_ncdm, sizeof(double)*pba->N_ncdm,error_message);
}
/** - calculate perturbed recombination */
if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
delta_temp = y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
delta_chi =y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
}
/** - for scalar modes */
if (_scalars_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
}
else {
delta_g = ppw->rsa_delta_g;
theta_g = ppw->rsa_theta_g;
}
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_on) {
shear_g = ppw->tca_shear_g;
//l3_g = 6./7.*k/pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
pol0_g = 2.5*ppw->tca_shear_g;
pol1_g = 7./12.*6./7.*k/pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
pol2_g = 0.5*ppw->tca_shear_g;
//pol3_g = 0.25*6./7.*k/pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
}
else {
shear_g = y[ppw->pv->index_pt_shear_g];
//l3_g = y[ppw->pv->index_pt_l3_g];
pol0_g = y[ppw->pv->index_pt_pol0_g];
pol1_g = y[ppw->pv->index_pt_pol1_g];
pol2_g = y[ppw->pv->index_pt_pol2_g];
//pol3_g = y[ppw->pv->index_pt_pol3_g];
}
}
else {
shear_g = 0;
//l3_g = 0;
pol0_g = 0;
pol1_g = 0;
pol2_g = 0;
//pol3_g = 0.;
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
delta_ur = y[ppw->pv->index_pt_delta_ur];
theta_ur = y[ppw->pv->index_pt_theta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
}
else {
delta_ur = ppw->rsa_delta_ur;
theta_ur = ppw->rsa_theta_ur;
shear_ur = 0.;
}
}
delta_b = y[ppw->pv->index_pt_delta_b];
theta_b = y[ppw->pv->index_pt_theta_b];
if (pba->has_cdm == _TRUE_) {
delta_cdm = y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == synchronous) {
theta_cdm = 0.;
}
else {
theta_cdm = y[ppw->pv->index_pt_theta_cdm];
}
}
/* gravitational potentials */
if (ppt->gauge == synchronous) {
alpha = pvecmetric[ppw->index_mt_alpha];
psi = pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] * alpha + pvecmetric[ppw->index_mt_alpha_prime];
phi = y[ppw->pv->index_pt_eta] - pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
phi_prime = 0.0; //CGT
}
else if (ppt->gauge == newtonian){
psi = pvecmetric[ppw->index_mt_psi];
phi = y[ppw->pv->index_pt_phi];
phi_prime = dy[ppw->pv->index_pt_phi]; //CGT
}
else{
psi = 0.0;
phi = 0.0;
phi_prime = 0.0; //CGT
}
if (pba->has_ncdm == _TRUE_) {
/** - --> Get delta, deltaP/rho, theta, shear and store in array */
idx = ppw->pv->index_pt_psi0_ncdm1;
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){
// The perturbations are evolved integrated:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm];
pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm];
w_ncdm = p_ncdm_bg/rho_ncdm_bg;
delta_ncdm[n_ncdm] = y[idx];
theta_ncdm[n_ncdm] = y[idx+1];
shear_ncdm[n_ncdm] = y[idx+2];
//This is the adiabatic sound speed:
delta_p_over_delta_rho_ncdm[n_ncdm] = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg));
idx += ppw->pv->l_max_ncdm[n_ncdm]+1;
}
}
else{
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
delta_p_over_delta_rho_ncdm[n_ncdm] = delta_p_ncdm/rho_delta_ncdm;
}
}
}
if (pba->has_dcdm == _TRUE_) {
delta_dcdm = y[ppw->pv->index_pt_delta_dcdm];
theta_dcdm = y[ppw->pv->index_pt_theta_dcdm];
}
if (pba->has_dr == _TRUE_) {
f_dr = pow(pvecback[pba->index_bg_a]*pvecback[pba->index_bg_a]/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
delta_dr = y[ppw->pv->index_pt_F0_dr]/f_dr;
theta_dr = y[ppw->pv->index_pt_F0_dr+1]*3./4.*k/f_dr;
shear_dr = y[ppw->pv->index_pt_F0_dr+2]*0.5/f_dr;
}
if (pba->has_scf == _TRUE_){
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]);
}
rho_plus_p_theta_scf = 1./3.*
k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
delta_scf = delta_rho_scf/pvecback[pba->index_bg_rho_scf];
theta_scf = rho_plus_p_theta_scf/(pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]);
}
/* converting synchronous variables to newtonian ones */
/************************/
/* For use with CONCEPT */
/************************/
/* Do not convert to Newtonian gauge */
if (0 == 1) { /* (ppt->gauge == synchronous) { */
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/* density and velocity perturbations (comment out if you wish to keep synchronous variables) */
delta_g -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_g += k*k*alpha;
delta_b -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_b += k*k*alpha;
if (pba->has_ur == _TRUE_) {
delta_ur -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_ur += k*k*alpha;
}
if (pba->has_dr == _TRUE_) {
delta_dr += (-4.*a*H+a*pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]/pvecback[pba->index_bg_rho_dr])*alpha;
theta_dr += k*k*alpha;
}
if (pba->has_cdm == _TRUE_) {
delta_cdm -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_cdm += k*k*alpha;
}
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
/** - --> Do gauge transformation of delta, deltaP/rho (?) and theta using -= 3aH(1+w_ncdm) alpha for delta. */
}
}
if (pba->has_dcdm == _TRUE_) {
delta_dcdm += alpha*(-a*pba->Gamma_dcdm-3.*a*H);
theta_dcdm += k*k*alpha;
}
if (pba->has_scf == _TRUE_) {
delta_scf += alpha*(-3.0*H*(1.0+pvecback[pba->index_bg_p_scf]/pvecback[pba->index_bg_rho_scf]));
theta_scf += k*k*alpha;
}
}
// fprintf(ppw->perturb_output_file," ");
/** - --> Handle (re-)allocation */
if (ppt->scalar_perturbations_data[ppw->index_ikout] == NULL){
class_alloc(ppt->scalar_perturbations_data[ppw->index_ikout],
sizeof(double)*ppt->number_of_scalar_titles,
error_message);
ppt->size_scalar_perturbation_data[ppw->index_ikout] = 0;
}
else{
ppt->scalar_perturbations_data[ppw->index_ikout] =
realloc(ppt->scalar_perturbations_data[ppw->index_ikout],
sizeof(double)*(ppt->size_scalar_perturbation_data[ppw->index_ikout]+ppt->number_of_scalar_titles));
}
storeidx = 0;
dataptr = ppt->scalar_perturbations_data[ppw->index_ikout]+
ppt->size_scalar_perturbation_data[ppw->index_ikout];
ppt->size_scalar_perturbation_data[ppw->index_ikout] += ppt->number_of_scalar_titles;
class_store_double(dataptr, tau, _TRUE_, storeidx);
class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx);
class_store_double(dataptr, delta_g, _TRUE_, storeidx);
class_store_double(dataptr, theta_g, _TRUE_, storeidx);
class_store_double(dataptr, shear_g, _TRUE_, storeidx);
class_store_double(dataptr, pol0_g, _TRUE_, storeidx);
class_store_double(dataptr, pol1_g, _TRUE_, storeidx);
class_store_double(dataptr, pol2_g, _TRUE_, storeidx);
class_store_double(dataptr, delta_b, _TRUE_, storeidx);
class_store_double(dataptr, theta_b, _TRUE_, storeidx);
class_store_double(dataptr, psi, _TRUE_, storeidx);
class_store_double(dataptr, phi, _TRUE_, storeidx);
class_store_double(dataptr, phi_prime, _TRUE_, storeidx); // CGT
/* perturbed recombination */
class_store_double(dataptr, delta_temp, ppt->has_perturbed_recombination, storeidx);
class_store_double(dataptr, delta_chi, ppt->has_perturbed_recombination, storeidx);
/* Ultra relativistic species */
class_store_double(dataptr, delta_ur, pba->has_ur, storeidx);
class_store_double(dataptr, theta_ur, pba->has_ur, storeidx);
class_store_double(dataptr, shear_ur, pba->has_ur, storeidx);
/* Cold dark matter */
class_store_double(dataptr, delta_cdm, pba->has_cdm, storeidx);
class_store_double(dataptr, theta_cdm, pba->has_cdm, storeidx);
/* Non-cold Dark Matter */
if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) {
idx = ppw->pv->index_pt_psi0_ncdm1;
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
class_store_double(dataptr, delta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, theta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, shear_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, delta_p_over_delta_rho_ncdm[n_ncdm], _TRUE_, storeidx);
/************************/
/* For use with CONCEPT */
/************************/
/* Include ncdm Theta_n_q_l_ncdm[n,q,l] in perturbation output */
class_store_double(dataptr, pba->M_ncdm[n_ncdm], _TRUE_, storeidx);
if (ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on) {
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
class_store_double(dataptr, 0.0, _TRUE_, storeidx);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
class_store_double(dataptr, 0.0, _TRUE_, storeidx);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
for (index_l=0; index_l<=ppw->pv->l_max_ncdm[n_ncdm]; index_l++) {
class_store_double(dataptr, 0.0, _TRUE_, storeidx);
}
}
}
else {
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
class_store_double(dataptr, pba->dlnf0_dlnq_ncdm[n_ncdm][index_q], _TRUE_, storeidx);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
class_store_double(dataptr, pba->q_ncdm[n_ncdm][index_q], _TRUE_, storeidx);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
for (index_l=0; index_l<=ppw->pv->l_max_ncdm[n_ncdm]; index_l++) {
class_store_double(dataptr, -y[idx]/dlnf0_dlnq, _TRUE_, storeidx);
idx++;
/* class_store_double(dataptr, y[idx], _TRUE_, storeidx); */
/* Jump to next momentum bin */
/* idx += (ppw->pv->l_max_ncdm[n_ncdm]+1); */
}
}
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
}
/* Decaying cold dark matter */
class_store_double(dataptr, delta_dcdm, pba->has_dcdm, storeidx);
class_store_double(dataptr, theta_dcdm, pba->has_dcdm, storeidx);
/* Decay radiation */
class_store_double(dataptr, delta_dr, pba->has_dr, storeidx);
class_store_double(dataptr, theta_dr, pba->has_dr, storeidx);
class_store_double(dataptr, shear_dr, pba->has_dr, storeidx);
/* Scalar field scf*/
class_store_double(dataptr, delta_scf, pba->has_scf, storeidx);
class_store_double(dataptr, theta_scf, pba->has_scf, storeidx);
//fprintf(ppw->perturb_output_file,"\n");
/************************/
/* For use with CONCEPT */
/************************/
/* Include fld in perturbation output */
double w_fld, dw_over_da_fld, integral_fld, theta_fld;
if (pba->has_fld) {
class_call(background_w_fld(pba, a, &w_fld, &dw_over_da_fld, &integral_fld),
pba->error_message, ppt->error_message);
class_store_double(dataptr, ppw->delta_rho_fld/pvecback[pba->index_bg_rho_fld],
pba->has_fld, storeidx);
/* For w_fld = -1 (Lambda), we have theta = 0 */
if (w_fld == -1.) {
theta_fld = 0.;
}
else {
theta_fld = ppw->rho_plus_p_theta_fld/
((1. + w_fld)*pvecback[pba->index_bg_rho_fld]);
}
class_store_double(dataptr, theta_fld, pba->has_fld, storeidx);
/**
* We choose to store cs2_fld = delta_p_fld/delta_rho_fld rather than
* simply delta_p_fld itself, as is done for massive neutrinos.
*
*/
class_store_double(dataptr,
ppw->delta_p_fld/ppw->delta_rho_fld, pba->has_fld, storeidx);
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/* Include theta_tot in perturbation output */
double rho_plus_p_tot = -2./3.*pvecback[pba->index_bg_H_prime]/a + 2./3.*pba->K/(a*a);
double theta_tot = ppw->rho_plus_p_theta/rho_plus_p_tot;
class_store_double(dataptr, theta_tot, _TRUE_, storeidx);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/* Include h_prime in perturbation output */
class_store_double(dataptr, pvecmetric[ppw->index_mt_h_prime],
ppt->gauge == synchronous, storeidx);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/**
* Include H_T_prime (in N-body gauge) in perturbation output.
* Here we make use of rho_plus_p_tot defined earlier.
*/
double p_tot_prime = 0.0;
/* Photons */
p_tot_prime += -3.*a*H*(1. + 1./3.)*1./3.*pvecback[pba->index_bg_rho_g];
/* Baryons have no pressure */
/* Ultra relativistic species */
if (pba->has_ur == _TRUE_)
p_tot_prime += -3.*a*H*(1. + 1./3.)*1./3.*pvecback[pba->index_bg_rho_ur];
/* Cold dark matter has no pressure */
/* Non-cold dark matter */
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++)
p_tot_prime += -a*H*(5.*pvecback[pba->index_bg_p_ncdm1+n_ncdm]
- pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]);
}
/* Decaying cold dark matter has no pressure */
/* Decay radiation */
if (pba->has_dr == _TRUE_)
p_tot_prime += -3.*a*H*(1. + 1./3.)*1./3.*pvecback[pba->index_bg_rho_dr]
+ 1./3.*a*pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm];
/* Dark energy fluid */
if (pba->has_fld == _TRUE_) {
p_tot_prime += a*H*pvecback[pba->index_bg_rho_fld]
*(a*dw_over_da_fld - 3.*w_fld*(1. + w_fld));
}
/* Scalar field */
if (pba->has_scf == _TRUE_) {
p_tot_prime += -H/a*pvecback[pba->index_bg_phi_prime_scf]
*pvecback[pba->index_bg_phi_prime_scf]
- 2./3.*pvecback[pba->index_bg_dV_scf]*pvecback[pba->index_bg_phi_prime_scf];
}
/* Lambda has constant pressure */
double H_T_prime = 3.*a*H/rho_plus_p_tot*(
- ppw->delta_p
+ p_tot_prime*theta_tot/(k*k)
+ ppw->rho_plus_p_shear);
class_store_double(dataptr, H_T_prime, _TRUE_, storeidx);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
/** - for tensor modes: */
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
delta_g = y[ppw->pv->index_pt_delta_g];
shear_g = y[ppw->pv->index_pt_shear_g];
l4_g = y[ppw->pv->index_pt_delta_g+4];
pol0_g = y[ppw->pv->index_pt_pol0_g];
pol2_g = y[ppw->pv->index_pt_pol2_g];
pol4_g = y[ppw->pv->index_pt_pol0_g+4];
}
else {
delta_g = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/pvecthermo[pth->index_th_dkappa]; //TBC
shear_g = 0.;
l4_g = 0.;
pol0_g = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/pvecthermo[pth->index_th_dkappa]; //TBC
pol2_g = 0.;
pol4_g = 0.;
}
}
else {
delta_g = 0.;
shear_g = 0.;
l4_g = 0.;
pol0_g = 0.;
pol2_g = 0.;
pol4_g = 0.;
}
if (ppt->evolve_tensor_ur == _TRUE_){
delta_ur = y[ppw->pv->index_pt_delta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
l4_ur = y[ppw->pv->index_pt_delta_ur+4];
}
/** - --> Handle (re-)allocation */
if (ppt->tensor_perturbations_data[ppw->index_ikout] == NULL){
class_alloc(ppt->tensor_perturbations_data[ppw->index_ikout],
sizeof(double)*ppt->number_of_tensor_titles,
error_message);
ppt->size_tensor_perturbation_data[ppw->index_ikout] = 0;
}
else{
ppt->tensor_perturbations_data[ppw->index_ikout] =
realloc(ppt->tensor_perturbations_data[ppw->index_ikout],
sizeof(double)*(ppt->size_tensor_perturbation_data[ppw->index_ikout]+ppt->number_of_tensor_titles));
}
storeidx = 0;
dataptr = ppt->tensor_perturbations_data[ppw->index_ikout]+
ppt->size_tensor_perturbation_data[ppw->index_ikout];
ppt->size_tensor_perturbation_data[ppw->index_ikout] += ppt->number_of_tensor_titles;
//fprintf(ppw->perturb_output_file," ");
class_store_double(dataptr, tau, _TRUE_, storeidx);
class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx);
class_store_double(dataptr, delta_g, _TRUE_, storeidx);
class_store_double(dataptr, shear_g, _TRUE_, storeidx);
class_store_double(dataptr, l4_g, _TRUE_, storeidx);
class_store_double(dataptr, pol0_g, _TRUE_, storeidx);
class_store_double(dataptr, pol2_g, _TRUE_, storeidx);
class_store_double(dataptr, pol4_g, _TRUE_, storeidx);
class_store_double(dataptr, y[ppw->pv->index_pt_gw], _TRUE_, storeidx);
class_store_double(dataptr, y[ppw->pv->index_pt_gwdot], _TRUE_, storeidx);
class_store_double(dataptr, delta_ur, ppt->evolve_tensor_ur, storeidx);
class_store_double(dataptr, shear_ur, ppt->evolve_tensor_ur, storeidx);
class_store_double(dataptr, l4_ur, ppt->evolve_tensor_ur, storeidx);
//printf("index_pt_delta+ur = %d\n",ppw->pv->index_pt_delta_ur);
/* Non-cold Dark Matter */
if (ppt->evolve_tensor_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
class_store_double(dataptr, delta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, theta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, shear_ncdm[n_ncdm], _TRUE_, storeidx);
}
}
// fprintf(ppw->perturb_output_file,"\n");
}
if (pba->has_ncdm == _TRUE_){
free(delta_ncdm);
free(theta_ncdm);
free(shear_ncdm);
free(delta_p_over_delta_rho_ncdm);
}
return _SUCCESS_;
}
/**
* Compute derivative of all perturbations to be integrated
*
* For each mode (scalar/vector/tensor) and each wavenumber k, this
* function computes the derivative of all values in the vector of
* perturbed variables to be integrated.
*
* This is one of the few functions in the code which is passed to the generic_integrator() routine.
* Since generic_integrator() should work with functions passed from various modules, the format of the arguments
* is a bit special:
* - fixed parameters and workspaces are passed through a generic pointer.
* generic_integrator() doesn't know what the content of this pointer is.
* - errors are not written as usual in pth->error_message, but in a generic
* error_message passed in the list of arguments.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Output: vector of its derivatives (already allocated)
* @param parameters_and_workspace Input/Output: in input, fixed parameters (e.g. indices); in output, background and thermo quantities evaluated at tau.
* @param error_message Output: error message
*/
int perturb_derivs(double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* multipole */
int l;
/* scale factor and other background quantities */
double a,a2,a_prime_over_a,R;
/* short-cut names for the fields of the input structure */
struct perturb_parameters_and_workspace * pppaw;
double k,k2;
int index_md;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double * s_l;
struct perturb_vector * pv;
/* short-cut notations for the perturbations */
double delta_g=0.,theta_g=0.,shear_g=0.;
double delta_b,theta_b;
double cb2,cs2,ca2;
double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_ufa_class=0.;
/* perturbed recombination (just to simplify the notation) */
double H0=0.,Nnow=0.,n_H=0.,fHe=0.;
double delta_temp=0.,delta_chi=0., chi=0.;
double alpha_rec=0.,delta_alpha_rec=0.;
double a_rad=0., Compton_CR =0.;
double Tb_in_K=0.;
/* Non-metric source terms for photons, i.e. \mathcal{P}^{(m)} from arXiv:1305.3261 */
double P0,P1,P2;
/* for use with fluid (fld): */
double w_fld,dw_over_da_fld,w_prime_fld,integral_fld;
/* for use with non-cold dark matter (ncdm): */
int index_q,n_ncdm,idx;
double q,epsilon,dlnf0_dlnq,qk_div_epsilon;
double rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm,w_ncdm,ca2_ncdm,ceff2_ncdm=0.,cvis2_ncdm=0.;
/* for use with curvature */
double cotKgen, sqrt_absK;
double s2_squared, ssqrt3;
/* for use with dcdm and dr */
double f_dr, fprime_dr;
/** - rename the fields of the input structure (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
k2=k*k;
index_md = pppaw->index_md;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
s_l = ppw->s_l;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
pv = ppw->pv;
/** - get background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
/** - get metric perturbations with perturb_einstein() */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
/** - compute related background quantities */
a = pvecback[pba->index_bg_a];
a2 = a*a;
a_prime_over_a = pvecback[pba->index_bg_H] * a;
R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b];
/** - Compute 'generalised cotK function of argument \f$ \sqrt{|K|}*\tau \f$, for closing hierarchy.
(see equation 2.34 in arXiv:1305.3261): */
if (pba->has_curvature == _FALSE_){
cotKgen = 1.0/(k*tau);
}
else{
sqrt_absK = sqrt(fabs(pba->K));
if (pba->K < 0)
cotKgen = sqrt_absK/k/tanh(sqrt_absK*tau);
else
cotKgen = sqrt_absK/k/tan(sqrt_absK*tau);
}
s2_squared = 1.-3.*pba->K/k2;
/** - for scalar modes: */
if (_scalars_) {
/** - --> (a) define short-cut notations for the scalar perturbations */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
}
delta_b = y[pv->index_pt_delta_b];
theta_b = y[pv->index_pt_theta_b];
cb2 = pvecthermo[pth->index_th_cb2];
/** - --> (b) perturbed recombination **/
if ((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca]==(int)tca_off)){
delta_temp= y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
delta_chi= y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
chi=pvecthermo[pth->index_th_xe];
// Conversion of H0 in inverse seconds (pba->H0 is [H0/c] in inverse Mpcs)
H0 = pba->H0 * _c_ / _Mpc_over_m_;
//Computation of Nnow in SI units
Nnow = 3.*H0*H0*pba->Omega0_b*(1.-pth->YHe)/(8.*_PI_*_G_*_m_H_);
// total amount of hydrogen today
n_H = (pba->a_today/a)*(pba->a_today/a)*(pba->a_today/a)* Nnow;
// Helium-to-hydrogen ratio
fHe = pth->YHe / (_not4_*(1-pth->YHe));
// The constant such that rho_gamma = a_rad * T^4
a_rad = 8./15.*pow(_PI_,5)*pow(_k_B_,4)/pow(_c_*_h_P_,3);
// Compton cooling rate in Mpc^(-1)
Compton_CR = 8./3. *_sigma_ * a_rad /(_m_e_ * _c_ *_c_) *_Mpc_over_m_ ;
// Temperature is already in Kelvin
Tb_in_K = pvecthermo[pth->index_th_Tb];
// Alpha in m^3/s, cf. Recfast paper
alpha_rec = 1.14 * 4.309e-19*pow((Tb_in_K * 1e-4),-0.6166)/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) ;
// delta alpha, dimensionless
delta_alpha_rec= (-0.6166 + 0.6703 * pow((Tb_in_K * 1e-4),0.53)*(-0.6166-0.53))/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) * delta_temp;
} // end of perturbed recombination related quantities
/** - --> (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below)
- Each continuity equation contains a term in (theta+metric_continuity) with
metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge
- Each Euler equation contains a source term metric_euler with
metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge
- Each shear derivative equation contains a source term metric_shear equal to
metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge
- metric_shear_prime is the derivative of metric_shear
- In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge,
(-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */
if (ppt->gauge == synchronous) {
metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.;
metric_euler = 0.;
metric_shear = k2 * pvecmetric[ppw->index_mt_alpha];
//metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime];
metric_ufa_class = pvecmetric[ppw->index_mt_h_prime]/2.;
}
if (ppt->gauge == newtonian) {
metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime];
metric_euler = k2*pvecmetric[ppw->index_mt_psi];
metric_shear = 0.;
//metric_shear_prime = 0.;
metric_ufa_class = -6.*pvecmetric[ppw->index_mt_phi_prime];
}
/** - --> (d) if some approximation schemes are turned on, enforce a few y[] values computed in perturb_einstein */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
delta_g = ppw->rsa_delta_g;
theta_g = ppw->rsa_theta_g;
}
/** - --> (e) BEGINNING OF ACTUAL SYSTEM OF EQUATIONS OF EVOLUTION */
/* Note concerning perturbed recombination: $cb2*delta_b$ must be replaced everywhere by $cb2*(delta_b+delta_temp)$. If perturbed recombination is not required, delta_temp is equal to zero. */
/** - ---> photon temperature density */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
dy[pv->index_pt_delta_g] = -4./3.*(theta_g+metric_continuity);
}
/** - ---> baryon density */
dy[pv->index_pt_delta_b] = -(theta_b+metric_continuity);
/** - ---> baryon velocity (depends on tight-coupling approximation=tca) */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* without tca */
/** - ----> perturbed recombination has an impact **/
dy[pv->index_pt_theta_b] =
- a_prime_over_a*theta_b
+ metric_euler
+ k2*cb2*(delta_b+delta_temp)
+ R*pvecthermo[pth->index_th_dkappa]*(theta_g-theta_b);
}
else {
/* with tca */
class_call(perturb_tca_slip_and_shear(y,pppaw,error_message),
error_message,
error_message);
/* perturbed recombination has an impact **/
dy[pv->index_pt_theta_b] =
(-a_prime_over_a*theta_b
+k2*(cb2*(delta_b+delta_temp)+R*(delta_g/4.-s2_squared*ppw->tca_shear_g))
+R*ppw->tca_slip)/(1.+R)
+metric_euler;
}
/** - ---> photon temperature higher momenta and photon polarization (depend on tight-coupling approximation) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** - ----> if photon tight-coupling is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/** - -----> define \f$ \Pi = G_{\gamma 0} + G_{\gamma 2} + F_{\gamma 2} \f$ */
P0 = (y[pv->index_pt_pol0_g] + y[pv->index_pt_pol2_g] + 2.*s_l[2]*y[pv->index_pt_shear_g])/8.;
/** - -----> photon temperature velocity */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s2_squared*y[pv->index_pt_shear_g])
+ metric_euler
+ pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g);
/** - -----> photon temperature shear */
dy[pv->index_pt_shear_g] =
0.5*(8./15.*(theta_g+metric_shear)
-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_l3_g]
-pvecthermo[pth->index_th_dkappa]*(2.*y[pv->index_pt_shear_g]-4./5./s_l[2]*P0));
/** - -----> photon temperature l=3 */
l = 3;
dy[pv->index_pt_l3_g] = k/(2.0*l+1.0)*
(l*s_l[l]*2.*s_l[2]*y[pv->index_pt_shear_g]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_g+1])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/** - -----> photon temperature l>3 */
for (l = 4; l < pv->l_max_g; l++) {
dy[pv->index_pt_delta_g+l] = k/(2.0*l+1.0)*
(l*s_l[l]*y[pv->index_pt_delta_g+l-1]-(l+1)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
}
/** - -----> photon temperature lmax */
l = pv->l_max_g; /* l=lmax */
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/** - -----> photon polarization l=0 */
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-4.*P0);
/** - -----> photon polarization l=1 */
dy[pv->index_pt_pol1_g] =
k/3.*(y[pv->index_pt_pol1_g-1]-2.*s_l[2]*y[pv->index_pt_pol1_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol1_g];
/** - -----> photon polarization l=2 */
dy[pv->index_pt_pol2_g] =
k/5.*(2.*s_l[2]*y[pv->index_pt_pol2_g-1]-3.*s_l[3]*y[pv->index_pt_pol2_g+1])
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol2_g]-4./5.*P0);
/** - -----> photon polarization l>2 */
for (l=3; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/** - -----> photon polarization lmax_pol */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
}
/** - ----> if photon tight-coupling is on: */
else {
/** - -----> in that case, only need photon velocity */
/* perturbed recombination has an impact **/
dy[pv->index_pt_theta_g] =
-(dy[pv->index_pt_theta_b]+a_prime_over_a*theta_b-cb2*k2*(delta_b+delta_temp))/R
+k2*(0.25*delta_g-s2_squared*ppw->tca_shear_g)+(1.+R)/R*metric_euler;
}
}
/** - ---> cdm */
if (pba->has_cdm == _TRUE_) {
/** - ----> newtonian gauge: cdm density and velocity */
if (ppt->gauge == newtonian) {
dy[pv->index_pt_delta_cdm] = -(y[pv->index_pt_theta_cdm]+metric_continuity); /* cdm density */
dy[pv->index_pt_theta_cdm] = - a_prime_over_a*y[pv->index_pt_theta_cdm] + metric_euler; /* cdm velocity */
}
/** - ----> synchronous gauge: cdm density only (velocity set to zero by definition of the gauge) */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_delta_cdm] = -metric_continuity; /* cdm density */
}
}
/* perturbed recombination */
/* computes the derivatives of delta x_e and delta T_b */
if((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca] == (int)tca_off)){
// alpha * n_H is in inverse seconds, so we have to multiply it by Mpc_in_sec
dy[ppw->pv->index_pt_perturbed_recombination_delta_chi] = - alpha_rec* a * chi*n_H *(delta_alpha_rec + delta_chi + delta_b) * _Mpc_over_m_ / _c_ ;
// see the documentation for this formula
dy[ppw->pv->index_pt_perturbed_recombination_delta_temp] = 2./3. * dy[ppw->pv->index_pt_delta_b] - a * Compton_CR * pow(pba->T_cmb/a, 4) * chi / (1.+chi+fHe) * ( (1.-pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb])*(delta_g + delta_chi*(1.+fHe)/(1.+chi+fHe)) + pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb] *(delta_temp - 1./4. * delta_g) );
}
/** - ---> dcdm and dr */
if (pba->has_dcdm == _TRUE_) {
/** - ----> dcdm */
dy[pv->index_pt_delta_dcdm] = -(y[pv->index_pt_theta_dcdm]+metric_continuity)
- a * pba->Gamma_dcdm / k2 * metric_euler; /* dcdm density */
dy[pv->index_pt_theta_dcdm] = - a_prime_over_a*y[pv->index_pt_theta_dcdm] + metric_euler; /* dcdm velocity */
}
/** - ---> dr */
if ((pba->has_dcdm == _TRUE_)&&(pba->has_dr == _TRUE_)) {
/* f = rho_dr*a^4/rho_crit_today. In CLASS density units
rho_crit_today = H0^2.
*/
f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
fprime_dr = pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]*pow(a,5)/pow(pba->H0,2);
/** - ----> dr F0 */
dy[pv->index_pt_F0_dr] = -k*y[pv->index_pt_F0_dr+1]-4./3.*metric_continuity*f_dr+
fprime_dr*(y[pv->index_pt_delta_dcdm]+metric_euler/k2);
/** - ----> dr F1 */
dy[pv->index_pt_F0_dr+1] = k/3.*y[pv->index_pt_F0_dr]-2./3.*k*y[pv->index_pt_F0_dr+2]*s2_squared +
4*metric_euler/(3.*k)*f_dr + fprime_dr/k*y[pv->index_pt_theta_dcdm];
/** - ----> exact dr F2 */
dy[pv->index_pt_F0_dr+2] = 8./15.*(3./4.*k*y[pv->index_pt_F0_dr+1]+metric_shear*f_dr) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_F0_dr+3];
/** - ----> exact dr l=3 */
l = 3;
dy[pv->index_pt_F0_dr+3] = k/(2.*l+1.)*
(l*s_l[l]*s_l[2]*y[pv->index_pt_F0_dr+2]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+4]);
/** - ----> exact dr l>3 */
for (l = 4; l < pv->l_max_dr; l++) {
dy[pv->index_pt_F0_dr+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_F0_dr+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+l+1]);
}
/** - ----> exact dr lmax_dr */
l = pv->l_max_dr;
dy[pv->index_pt_F0_dr+l] =
k*(s_l[l]*y[pv->index_pt_F0_dr+l-1]-(1.+l)*cotKgen*y[pv->index_pt_F0_dr+l]);
}
/** - ---> fluid (fld) */
if (pba->has_fld == _TRUE_) {
if (pba->use_ppf == _FALSE_){
/** - ----> factors w, w_prime, adiabatic sound speed ca2 (all three background-related),
plus actual sound speed in the fluid rest frame cs2 */
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
w_prime_fld = dw_over_da_fld * a_prime_over_a * a;
ca2 = w_fld - w_prime_fld / 3. / (1.+w_fld) / a_prime_over_a;
cs2 = pba->cs2_fld;
/** - ----> fluid density */
dy[pv->index_pt_delta_fld] =
-(1+w_fld)*(y[pv->index_pt_theta_fld]+metric_continuity)
-3.*(cs2-w_fld)*a_prime_over_a*y[pv->index_pt_delta_fld]
-9.*(1+w_fld)*(cs2-ca2)*a_prime_over_a*a_prime_over_a*y[pv->index_pt_theta_fld]/k2;
/** - ----> fluid velocity */
dy[pv->index_pt_theta_fld] = /* fluid velocity */
-(1.-3.*cs2)*a_prime_over_a*y[pv->index_pt_theta_fld]
+cs2*k2/(1.+w_fld)*y[pv->index_pt_delta_fld]
+metric_euler;
}
else {
dy[pv->index_pt_Gamma_fld] = ppw->Gamma_prime_fld; /* Gamma variable of PPF formalism */
}
}
/** - ---> scalar field (scf) */
if (pba->has_scf == _TRUE_) {
/** - ----> field value */
dy[pv->index_pt_phi_scf] = y[pv->index_pt_phi_prime_scf];
/** - ----> Klein Gordon equation */
dy[pv->index_pt_phi_prime_scf] = - 2.*a_prime_over_a*y[pv->index_pt_phi_prime_scf]
- metric_continuity*pvecback[pba->index_bg_phi_prime_scf] // metric_continuity = h'/2
- (k2 + a2*pvecback[pba->index_bg_ddV_scf])*y[pv->index_pt_phi_scf]; //checked
}
/** - ---> ultra-relativistic neutrino/relics (ur) */
if (pba->has_ur == _TRUE_) {
/** - ----> if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** - -----> ur density */
dy[pv->index_pt_delta_ur] =
// standard term
-4./3.*(y[pv->index_pt_theta_ur] + metric_continuity)
// non-standard term, non-zero if if ceff2_ur not 1/3
+(1.-ppt->three_ceff2_ur)*a_prime_over_a*(y[pv->index_pt_delta_ur] + 4.*a_prime_over_a*y[pv->index_pt_theta_ur]/k/k);
/** - -----> ur velocity */
dy[pv->index_pt_theta_ur] =
// standard term with extra coefficient (3 ceff2_ur), normally equal to one
k2*(ppt->three_ceff2_ur*y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]) + metric_euler
// non-standard term, non-zero if ceff2_ur not 1/3
-(1.-ppt->three_ceff2_ur)*a_prime_over_a*y[pv->index_pt_theta_ur];
if(ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
/** - -----> exact ur shear */
dy[pv->index_pt_shear_ur] =
0.5*(
// standard term
8./15.*(y[pv->index_pt_theta_ur]+metric_shear)-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]
// non-standard term, non-zero if cvis2_ur not 1/3
-(1.-ppt->three_cvis2_ur)*(8./15.*(y[pv->index_pt_theta_ur]+metric_shear)));
/** - -----> exact ur l=3 */
l = 3;
dy[pv->index_pt_l3_ur] = k/(2.*l+1.)*
(l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]);
/** - -----> exact ur l>3 */
for (l = 4; l < pv->l_max_ur; l++) {
dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]);
}
/** - -----> exact ur lmax_ur */
l = pv->l_max_ur;
dy[pv->index_pt_delta_ur+l] =
k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]);
}
else {
/** - -----> in fluid approximation (ufa): only ur shear needed */
//TBC: curvature?
/* a la Ma & Bertschinger */
if (ppr->ur_fluid_approximation == ufa_mb) {
dy[pv->index_pt_shear_ur] =
-3./tau*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_shear);
}
/* a la Hu */
if (ppr->ur_fluid_approximation == ufa_hu) {
dy[pv->index_pt_shear_ur] =
-3.*a_prime_over_a*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_shear);
}
/* a la CLASS */
if (ppr->ur_fluid_approximation == ufa_CLASS) {
dy[pv->index_pt_shear_ur] =
-3./tau*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_ufa_class);
}
}
}
}
/** - ---> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */
//TBC: curvature in all ncdm
if (pba->has_ncdm == _TRUE_) {
idx = pv->index_pt_psi0_ncdm1;
/** - ----> first case: use a fluid approximation (ncdmfa) */
//TBC: curvature
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on) {
/** - -----> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** - -----> define intermediate quantitites */
rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; /* background density */
p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm]; /* background pressure */
pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; /* pseudo-pressure (see CLASS IV paper) */
w_ncdm = p_ncdm_bg/rho_ncdm_bg; /* equation of state parameter */
ca2_ncdm = w_ncdm/3.0/(1.0+w_ncdm)*(5.0-pseudo_p_ncdm/p_ncdm_bg); /* adiabatic sound speed */
/* c_eff is (delta p / delta rho) in the gauge under
consideration (not in the gauge comoving with the
fluid) */
/* c_vis is introduced in order to close the system */
/* different ansatz for sound speed c_eff and viscosity speed c_vis */
if (ppr->ncdm_fluid_approximation == ncdmfa_mb) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = 3.*w_ncdm*ca2_ncdm;
}
if (ppr->ncdm_fluid_approximation == ncdmfa_hu) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = w_ncdm;
}
if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = 3.*w_ncdm*ca2_ncdm;
}
/** - -----> exact continuity equation */
dy[idx] = -(1.0+w_ncdm)*(y[idx+1]+metric_continuity)-
3.0*a_prime_over_a*(ceff2_ncdm-w_ncdm)*y[idx];
/** - -----> exact euler equation */
dy[idx+1] = -a_prime_over_a*(1.0-3.0*ca2_ncdm)*y[idx+1]+
ceff2_ncdm/(1.0+w_ncdm)*k2*y[idx]-k2*y[idx+2]
+ metric_euler;
/** - -----> different ansatz for approximate shear derivative */
if (ppr->ncdm_fluid_approximation == ncdmfa_mb) {
dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear);
}
if (ppr->ncdm_fluid_approximation == ncdmfa_hu) {
dy[idx+2] = -3.0*a_prime_over_a*ca2_ncdm/w_ncdm*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear);
}
if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) {
dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_ufa_class);
}
/** - -----> jump to next species */
idx += pv->l_max_ncdm[n_ncdm]+1;
}
}
/** - ----> second case: use exact equation (Boltzmann hierarchy on momentum grid) */
else {
/** - -----> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** - -----> loop over momentum */
for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) {
/** - -----> define intermediate quantities */
dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
qk_div_epsilon = k*q/epsilon;
/** - -----> ncdm density for given momentum bin */
dy[idx] = -qk_div_epsilon*y[idx+1]+metric_continuity*dlnf0_dlnq/3.;
/** - -----> ncdm velocity for given momentum bin */
dy[idx+1] = qk_div_epsilon/3.0*(y[idx] - 2*s_l[2]*y[idx+2])
-epsilon*metric_euler/(3*q*k)*dlnf0_dlnq;
/** - -----> ncdm shear for given momentum bin */
dy[idx+2] = qk_div_epsilon/5.0*(2*s_l[2]*y[idx+1]-3.*s_l[3]*y[idx+3])
-s_l[2]*metric_shear*2./15.*dlnf0_dlnq;
/** - -----> ncdm l>3 for given momentum bin */
for(l=3; l<pv->l_max_ncdm[n_ncdm]; l++){
dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]);
}
/** - -----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger)
but with curvature taken into account a la arXiv:1305.3261 */
dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l];
/** - -----> jump to next momentum bin or species */
idx += (pv->l_max_ncdm[n_ncdm]+1);
}
}
}
}
/** - ---> metric */
/** - ---> eta of synchronous gauge */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_eta] = pvecmetric[ppw->index_mt_eta_prime];
}
if (ppt->gauge == newtonian) {
dy[pv->index_pt_phi] = pvecmetric[ppw->index_mt_phi_prime];
}
}
/** - vector mode */
if (_vectors_) {
fprintf(stderr,"we are in vectors\n");
ssqrt3 = sqrt(1.-2.*pba->K/k2);
cb2 = pvecthermo[pth->index_th_cb2];
/** - --> baryon velocity */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b]
- pvecthermo[pth->index_th_dkappa]*(_SQRT2_/4.*delta_g + y[pv->index_pt_theta_b]);
}
else if (ppt->gauge == newtonian) {
dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b]
- _SQRT2_/4.*pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b])
+ pvecmetric[ppw->index_mt_V_prime]+(1.-3.*cb2)*a_prime_over_a*y[pv->index_pt_V];
}
/*
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
*/
/* short-cut notations for the tensor perturbations */
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
shear_g = y[pv->index_pt_shear_g];
/* (P^{(1)}) (see Eq. B.23 in 1305.3261)*/
P1 = -_SQRT6_/40.*(
4./(3.*k)*theta_g //F1
+y[pv->index_pt_delta_g+3]
+2.*y[pv->index_pt_pol0_g]
+10./7.*y[pv->index_pt_pol2_g]
-4./7.*y[pv->index_pt_pol0_g+4]);
if (ppt->gauge == synchronous) {
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]);
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1)
+4.0/(3.0*_SQRT2_)*ssqrt3*y[pv->index_pt_hv_prime];
}
else if (ppt->gauge == newtonian) {
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b])
-2.*_SQRT2_*pvecmetric[ppw->index_mt_V_prime];
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1);
}
/* photon shear (shear_g = F_2/2) */
dy[pv->index_pt_shear_g] =
4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1]
-pvecthermo[pth->index_th_dkappa]*shear_g;
/* photon l=3 */
dy[pv->index_pt_l3_g] =
k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=4; l < pv->l_max_g; l++)
dy[pv->index_pt_delta_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* l=lmax */
l = pv->l_max_g;
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]
-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* photon polarization, l=0 (pol0_g = G_0)*/
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P1);
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=1; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/* l=lmax */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/*
}
}
*/
if (ppt->gauge == synchronous) {
/* Vector metric perturbation in synchronous gauge: */
dy[pv->index_pt_hv_prime] = pvecmetric[ppw->index_mt_hv_prime_prime];
}
else if (ppt->gauge == newtonian){
/* Vector metric perturbation in Newtonian gauge: */
dy[pv->index_pt_V] = pvecmetric[ppw->index_mt_V_prime];
}
}
/** - tensor modes: */
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
/* short-cut notations for the tensor perturbations */
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
shear_g = y[pv->index_pt_shear_g];
/* (P^{(2)}) */
P2 =-1.0/_SQRT6_*(
1./10.*delta_g
+2./7.*shear_g
+3./70.*y[pv->index_pt_delta_g+4]
-3./5.*y[pv->index_pt_pol0_g]
+6./7.*y[pv->index_pt_pol2_g]
-3./70.*y[pv->index_pt_pol0_g+4]);
/* above expression from paper, expression below matches old class but is not correct
P2 = -1.0/_SQRT6_*(
1./10.*delta_g
+2./35.*shear_g
+1./210.*y[pv->index_pt_delta_g+4]
-3./5.*y[pv->index_pt_pol0_g]
+6./35.*y[pv->index_pt_pol2_g]
-1./210.*y[pv->index_pt_pol0_g+4]
);
*/
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+_SQRT6_*P2)
//+y[pv->index_pt_gwdot];
+_SQRT6_*y[pv->index_pt_gwdot]; //TBC
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*theta_g;
/* photon shear (shear_g = F_2/2) */
dy[pv->index_pt_shear_g] =
4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1]
-pvecthermo[pth->index_th_dkappa]*shear_g;
/* photon l=3 */
dy[pv->index_pt_l3_g] =
k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=4; l < pv->l_max_g; l++)
dy[pv->index_pt_delta_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* l=lmax */
l = pv->l_max_g;
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]
-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* photon polarization, l=0 (pol0_g = G_0)*/
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P2);
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=1; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/* l=lmax */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
}
}
if (ppt->evolve_tensor_ur == _TRUE_) {
dy[pv->index_pt_delta_ur] = -4./3.*y[pv->index_pt_theta_ur]+_SQRT6_*y[pv->index_pt_gwdot];
dy[pv->index_pt_theta_ur] = k2*(y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]);
dy[pv->index_pt_shear_ur] = (4./15.*y[pv->index_pt_theta_ur]
-3./10.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]);
l = 3;
dy[pv->index_pt_l3_ur] = k/(2.*l+1.)*
(l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]);
for (l = 4; l < pv->l_max_ur; l++) {
dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]);
}
l = pv->l_max_ur;
dy[pv->index_pt_delta_ur+l] =
k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]);
}
/** - --> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */
//TBC: curvature in all ncdm
if (ppt->evolve_tensor_ncdm == _TRUE_) {
idx = pv->index_pt_psi0_ncdm1;
/** - ---> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** - ----> loop over momentum */
for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) {
/** - ----> define intermediate quantities */
dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
qk_div_epsilon = k*q/epsilon;
/** - ----> ncdm density for given momentum bin */
dy[idx] = -qk_div_epsilon*y[idx+1]-0.25*_SQRT6_*y[pv->index_pt_gwdot]*dlnf0_dlnq;
/** - ----> ncdm l>0 for given momentum bin */
for(l=1; l<pv->l_max_ncdm[n_ncdm]; l++){
dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]);
}
/** - ----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger)
but with curvature taken into account a la arXiv:1305.3261 */
dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l];
/** - ----> jump to next momentum bin or species */
idx += (pv->l_max_ncdm[n_ncdm]+1);
}
}
}
/** - --> tensor metric perturbation h (gravitational waves) */
dy[pv->index_pt_gw] = y[pv->index_pt_gwdot];
/** - --> its time-derivative */
dy[pv->index_pt_gwdot] = pvecmetric[ppw->index_mt_gw_prime_prime];
}
return _SUCCESS_;
}
int perturb_tca_slip_and_shear(double * y,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* scale factor and other background quantities */
double a,a_prime_over_a,a_primeprime_over_a,R;
/* useful terms for tight-coupling approximation */
double slip=0.;
double tau_c=0.,dtau_c=0.;
double theta_prime,shear_g_prime=0.,theta_prime_prime;
double g0,g0_prime,g0_prime_prime;
double F=0.,F_prime=0.,F_prime_prime=0.;
/* short-cut names for the fields of the input structure */
struct perturb_parameters_and_workspace * pppaw;
double k,k2;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
struct perturb_vector * pv;
/* short-cut notations for the perturbations */
double delta_g=0.,theta_g=0.,shear_g=0.;
double delta_b,theta_b;
double Delta;
double cb2;
double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_shear_prime=0.;
/* perturbed recombination */
double delta_temp=0.;
/* for use with curvature */
double s2_squared;
/** - rename the fields of the input structure (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
k2=k*k;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
pv = ppw->pv;
/** - compute related background quantities */
a = pvecback[pba->index_bg_a];
a_prime_over_a = pvecback[pba->index_bg_H] * a;
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * a + 2. * a_prime_over_a * a_prime_over_a;
//z = pba->a_today-1.;
R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b];
s2_squared = 1.-3.*pba->K/k2;
/** - --> (a) define short-cut notations for the scalar perturbations */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
}
delta_b = y[pv->index_pt_delta_b];
theta_b = y[pv->index_pt_theta_b];
cb2 = pvecthermo[pth->index_th_cb2];
/* perturbed recombination */
if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
delta_temp = y[pv->index_pt_perturbed_recombination_delta_temp];
}
/** - --> (b) define short-cut notations used only in tight-coupling approximation */
tau_c = 1./pvecthermo[pth->index_th_dkappa]; /* inverse of opacity */
dtau_c = -pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c; /* its first derivative wrt conformal time */
F = tau_c/(1+R); /* F = tau_c/(1+R) */
if (ppr->tight_coupling_approximation >= (int)second_order_CLASS) {
F_prime = dtau_c/(1+R)+tau_c*a_prime_over_a*R/(1+R)/(1+R); /*F' needed by second_order_CLASS and compromise_CLASS */
if (ppr->tight_coupling_approximation == (int)second_order_CLASS) {
F_prime_prime =(- pvecthermo[pth->index_th_dddkappa]*tau_c*tau_c /* F'' needed by second_order_CLASS only */
+ 2.*pvecthermo[pth->index_th_ddkappa]*pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c*tau_c)/(1+R)
+2.*dtau_c*a_prime_over_a*R/(1+R)/(1+R)
+tau_c*((a_primeprime_over_a-2.*a_prime_over_a*a_prime_over_a)+2.*a_prime_over_a*a_prime_over_a*R/(1+R))*R/(1+R)/(1+R);
}
}
/** - --> (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below)
- Each continuity equation contains a term in (theta+metric_continuity) with
metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge
- Each Euler equation contains a source term metric_euler with
metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge
- Each shear derivative equation contains a source term metric_shear equal to
metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge
- metric_shear_prime is the derivative of metric_shear
- In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge,
(-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */
if (ppt->gauge == synchronous) {
metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.;
metric_euler = 0.;
metric_shear = k2 * pvecmetric[ppw->index_mt_alpha];
metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime];
}
if (ppt->gauge == newtonian) {
metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime];
metric_euler = k2*pvecmetric[ppw->index_mt_psi];
metric_shear = 0.;
metric_shear_prime = 0.;
}
/** - --> (d) if some approximation schemes are turned on, enforce a few y[ ] values computed in perturb_einstein */
/* free-streaming photon velocity */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)
theta_g = ppw->rsa_theta_g;
/** - ---> like Ma & Bertschinger */
if (ppr->tight_coupling_approximation == (int)first_order_MB) {
slip=2.*R/(1.+R)*a_prime_over_a*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** - ---> relax assumption dkappa~a\f$^{-2}\f$ (like in CAMB) */
if ((ppr->tight_coupling_approximation == (int)first_order_CAMB) || (ppr->tight_coupling_approximation == (int)compromise_CLASS)) {
slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** - ---> also relax assumption cb2~a\f$^{-1}\f$ */
if ((ppr->tight_coupling_approximation == (int)first_order_CLASS) || (ppr->tight_coupling_approximation == (int)second_order_CLASS)){
slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+pvecthermo[pth->index_th_dcb2]*delta_b
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** - ---> intermediate quantities for 2nd order tca: shear_g at first order in tight-coupling */
shear_g=16./45.*tau_c*(theta_g+metric_shear);
/* (Ma & Bertschinger give (1/9)*(4/3) instead of (2/15)*(4/3)
because they didn't include the contribution of G_gamma0
and G_gamma2, which are of the same order as sigma_g. This
was already consistently included in CAMB) */
/** - ---> intermediate quantities for 2nd order tca: zero order for theta_b' = theta_g' */
/** - ----> perturbed recombination has an impact **/
theta_prime = (-a_prime_over_a*theta_b+k2*(cb2*(delta_b+delta_temp)+R/4.*delta_g))/(1.+R) + metric_euler;
/** - ---> intermediate quantities for 2nd order tca: shear_g_prime at first order in tight-coupling */
shear_g_prime=16./45.*(tau_c*(theta_prime+metric_shear_prime)+dtau_c*(theta_g+metric_shear));
/** - ---> 2nd order as in CRS*/
if (ppr->tight_coupling_approximation == (int)second_order_CRS) {
if (ppt->gauge == newtonian) {
class_stop(error_message,
"the second_order_CRS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme");
}
if (ppt->gauge == synchronous) {
class_test(pba->sgnK != 0,
ppt->error_message,
"the second_order_CRS approach to tight-coupling is coded in the flat case only: for non-flat try another tight-coupling scheme");
/* infer Delta from h'' using Einstein equation */
Delta = 2*k2*y[pv->index_pt_eta]
-2*a_prime_over_a*pvecmetric[ppw->index_mt_h_prime]
-pvecmetric[ppw->index_mt_h_prime_prime];
/* monster expression for slip at second-order in tight-coupling */
slip=(-2./(1.+R)*a_prime_over_a-pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa])*(theta_b-theta_g)
+(-a_primeprime_over_a*theta_b
-k2*a_prime_over_a*(delta_g/2.-2.*shear_g)
+k2*(cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.
+shear_g_prime)
)/pvecthermo[pth->index_th_dkappa]/(1.+R)
-2.*R*(3.*a_prime_over_a*a_prime_over_a*cb2+(1.+R)*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)-3.*a_prime_over_a*a_prime_over_a)
/(1.+R)/(1.+R)/(1.+R)*(theta_b-theta_g)/pvecthermo[pth->index_th_dkappa]
+(
a_primeprime_over_a*a_prime_over_a*((2.-3.*cb2)*R-2.)*theta_b/(1.+R)
+a_prime_over_a*k2*(1.-3.*cb2)*theta_b/3./(1.+R)
/* perturbed recombination has an impact (next two lines) */
+a_primeprime_over_a*k2*cb2*(delta_b+delta_temp)/(1.+R)
+k2*k2*(3.*cb2-1.)*cb2*(delta_b+delta_temp)/3./(1.+R)
+k2*k2*R*(3.*cb2-1.)*delta_g/12./(1.+R)
+a_primeprime_over_a*k2*(2.+3.*R)*delta_g/4./(1.+R)
+a_prime_over_a*a_prime_over_a*k2*((2.-3.*cb2)*R-1.)*delta_g/2./(1.+R)
+a_prime_over_a*k2*cb2*(1.+(3.*cb2-2.)*R)*(-theta_b-metric_continuity)/(1.+R)
+a_prime_over_a*k2*(2.+(5.-3.*cb2)*R)*4./3.*(-theta_g-metric_continuity)/4./(1.+R)
+a_prime_over_a*(1.-3.*cb2)*k2*2.*metric_shear/3.
+k2*k2*(3.*cb2-1.)*y[pv->index_pt_eta]/3.
+2.*a_prime_over_a*k2*(3.*cb2-1.)*pvecmetric[ppw->index_mt_eta_prime]
+k2*(1.-3.*cb2)*Delta/6.
)/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/(1.+R)/(1.+R)
-(4.*a_primeprime_over_a*theta_b-4.*k2*cb2*(-theta_b-metric_continuity)+2.*a_prime_over_a*k2*delta_g+k2*4./3.*(-theta_g-metric_continuity))/2./(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]
+4.*a_prime_over_a*R/(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g);
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+k2*pvecmetric[ppw->index_mt_alpha_prime]);
}
}
/** - ---> 2nd order like in CLASS paper */
if (ppr->tight_coupling_approximation == (int)second_order_CLASS) {
if (ppt->gauge == newtonian) {
class_stop(error_message,
"the second_order_CLASS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme");
}
if (ppt->gauge == synchronous) {
/* zero order for theta_b'' = theta_g'' */
theta_prime_prime = ((R-1.)*a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b
+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+cb2*(-theta_b-metric_continuity)-a_prime_over_a*R/4.*delta_g+R/4.*4./3.*(-theta_g-metric_continuity)))/(1.+R);
/* zero-order quantities g0, g0', go'' */
g0 = -a_prime_over_a*theta_b + k2*(cb2*delta_b-delta_g/4.);
g0_prime = -a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+(1./3.-cb2)*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime]));
g0_prime_prime = -a_prime_over_a*theta_prime_prime-2.*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_prime
-(2.*a_prime_over_a*a_prime_over_a*a_prime_over_a-3.*a_primeprime_over_a*a_prime_over_a)*theta_b
+k2*(pvecthermo[pth->index_th_ddcb2]*delta_b-2.*pvecthermo[pth->index_th_dcb2]*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime])+(1./3.-cb2)*(theta_prime+0.5*pvecmetric[ppw->index_mt_h_prime_prime]));
/* slip at second order */
slip = (1.-2*a_prime_over_a*F)*slip + F*k2*s2_squared*(2.*a_prime_over_a*shear_g+shear_g_prime)
-F*(F_prime_prime*g0+2.*F_prime*g0_prime+F*g0_prime_prime);
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime);
}
}
/** - ---> add only the most important 2nd order terms */
if (ppr->tight_coupling_approximation == (int)compromise_CLASS) {
/* slip at second order (only leading second-order terms) */
slip = (1.-2.*a_prime_over_a*F)*slip + F*k2*(2.*a_prime_over_a*s2_squared*shear_g+s2_squared*shear_g_prime-(1./3.-cb2)*(F*theta_prime+2.*F_prime*theta_b));
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime);
}
/** - ---> store tight-coupling values of photon shear and its derivative */
ppw->tca_shear_g = shear_g;
ppw->tca_slip = slip;
return _SUCCESS_;
}
int perturb_rsa_delta_and_theta(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
double k,
double * y,
double a_prime_over_a,
double * pvecthermo,
struct perturb_workspace * ppw
) {
/* - define local variables */
double k2;
k2 = k*k;
// formulas below TBC for curvaturema
/* newtonian gauge */
if (ppt->gauge == newtonian) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_g = 0.;
ppw->rsa_theta_g = 0.;
}
else {
ppw->rsa_delta_g = -4.*y[ppw->pv->index_pt_phi];
ppw->rsa_theta_g = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime];
}
if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) {
ppw->rsa_delta_g +=
-4./k2*ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_b];
ppw->rsa_theta_g +=
3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*y[ppw->pv->index_pt_theta_b]
+ppw->pvecthermo[pth->index_th_dkappa]*
(-a_prime_over_a*y[ppw->pv->index_pt_theta_b]
+ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b]
+k2*y[ppw->pv->index_pt_phi]));
}
if (pba->has_ur == _TRUE_) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_ur = 0.;
ppw->rsa_theta_ur = 0.;
}
else {
ppw->rsa_delta_ur = -4.*y[ppw->pv->index_pt_phi];
ppw->rsa_theta_ur = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime];
}
}
}
}
/* synchronous gauge */
if (ppt->gauge == synchronous) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_g = 0.;
ppw->rsa_theta_g = 0.;
}
else {
ppw->rsa_delta_g = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
-k2*y[ppw->pv->index_pt_eta]);
ppw->rsa_theta_g = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime];
}
if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) {
ppw->rsa_delta_g +=
-4./k2*ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_b]+0.5*ppw->pvecmetric[ppw->index_mt_h_prime]);
ppw->rsa_theta_g +=
3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*
(y[ppw->pv->index_pt_theta_b]
+0.5*ppw->pvecmetric[ppw->index_mt_h_prime])
+ppw->pvecthermo[pth->index_th_dkappa]*
(-a_prime_over_a*y[ppw->pv->index_pt_theta_b]
+ ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b]
-a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
+k2*y[ppw->pv->index_pt_eta]));
}
if (pba->has_ur == _TRUE_) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_ur = 0.;
ppw->rsa_theta_ur = 0.;
}
else {
ppw->rsa_delta_ur = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
-k2*y[ppw->pv->index_pt_eta]);
ppw->rsa_theta_ur = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime];
}
}
}
}
return _SUCCESS_;
}
|
VariableSubSampler.h | /**
* @file
* This file is part of SeisSol.
*
* @author Sebastian Rettenberger (sebastian.rettenberger AT tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger)
*
* @section LICENSE
* Copyright (c) 2015, SeisSol Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
*/
#ifndef VARIABLE_SUBSAMPLER_H_
#define VARIABLE_SUBSAMPLER_H_
#include <cassert>
#include <algorithm>
#include <Eigen/Dense>
#include "Geometry/MeshReader.h"
#include "Numerical_aux/BasisFunction.h"
#include "RefinerUtils.h"
namespace seissol
{
namespace refinement
{
//------------------------------------------------------------------------------
template<class T>
class VariableSubsampler
{
private:
std::vector<basisFunction::SampledBasisFunctions<T> > m_BasisFunctions;
/** The original number of cells (without refinement) */
const unsigned int m_numCells;
const unsigned int kSubCellsPerCell;
const unsigned int kNumVariables;
const unsigned int kNumAlignedDOF;
std::size_t getInVarOffset(unsigned int cell, unsigned int variable,
const unsigned int* cellMap) const
{
return (cellMap[cell]*kNumVariables + variable) * kNumAlignedDOF;
}
std::size_t getOutVarOffset(unsigned cell, unsigned int subcell) const
{
return kSubCellsPerCell * cell + subcell;
}
public:
VariableSubsampler(
unsigned int numCells,
const TetrahedronRefiner<T>& tetRefiner,
unsigned int order,
unsigned int numVariables,
unsigned int numAlignedDOF
);
void get(const double* inData, const unsigned int* cellMap,
int variable, double* outData) const;
};
//------------------------------------------------------------------------------
template<typename T>
VariableSubsampler<T>::VariableSubsampler(
unsigned int numCells,
const TetrahedronRefiner<T>& tetRefiner,
unsigned int order,
unsigned int numVariables,
unsigned int numAlignedDOF)
: m_numCells(numCells),
kSubCellsPerCell(tetRefiner.getDivisionCount()),
kNumVariables(numVariables), kNumAlignedDOF(numAlignedDOF)
{
// Generate cell centerpoints in the reference or unit tetrahedron.
Tetrahedron<T>* subCells = new Tetrahedron<T>[kSubCellsPerCell];
Eigen::Matrix<T, 3, 1>* additionalVertices = new Eigen::Matrix<T, 3, 1>[tetRefiner.additionalVerticesPerCell()];
tetRefiner.refine(Tetrahedron<T>::unitTetrahedron(), 0,
subCells, additionalVertices);
// Generate sampled basicfunctions
for (unsigned int i = 0; i < kSubCellsPerCell; i++) {
const Eigen::Matrix<T, 3, 1> pnt = subCells[i].center();
m_BasisFunctions.push_back(
basisFunction::SampledBasisFunctions<T>(
order, pnt(0), pnt(1), pnt(2)));
}
delete [] subCells;
delete [] additionalVertices;
}
//------------------------------------------------------------------------------
template<typename T>
void VariableSubsampler<T>::get(const double* inData, const unsigned int* cellMap,
int variable, double* outData) const
{
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
// Iterate over original Cells
for (unsigned int c = 0; c < m_numCells; ++c) {
for (unsigned int sc = 0; sc < kSubCellsPerCell; ++sc) {
outData[getOutVarOffset(c, sc)] =
m_BasisFunctions[sc].evalWithCoeffs(&inData[getInVarOffset(c, variable, cellMap)]);
}
}
}
//------------------------------------------------------------------------------
} // namespace
}
#endif // VARIABLE_SUBSAMPLER_H_
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "magick/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(GetPixelRed(p),GetPixelGreen(p),
GetPixelBlue(p),&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p++;
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatMagickString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text);
if (brightness_standard_deviation != 0)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text);
saturation_mean=saturation_sum_x/area;
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatMagickString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text);
if (saturation_standard_deviation != 0)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text);
if (saturation_standard_deviation != 0)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text);
}
return(MagickImageFilterSignature);
}
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN(16) int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN(16) simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN(16) simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN(16) int8_t i8[16];
SIMDE_ALIGN(16) int16_t i16[8];
SIMDE_ALIGN(16) int32_t i32[4];
SIMDE_ALIGN(16) int64_t i64[2];
SIMDE_ALIGN(16) uint8_t u8[16];
SIMDE_ALIGN(16) uint16_t u16[8];
SIMDE_ALIGN(16) uint32_t u32[4];
SIMDE_ALIGN(16) uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN(16) simde_int128 i128[1];
SIMDE_ALIGN(16) simde_uint128 u128[1];
#endif
SIMDE_ALIGN(16) simde_float32 f32[4];
SIMDE_ALIGN(16) int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN(16) uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN(16) simde__m64_private m64_private[2];
SIMDE_ALIGN(16) simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN(16) __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN(16) int8x16_t neon_i8;
SIMDE_ALIGN(16) int16x8_t neon_i16;
SIMDE_ALIGN(16) int32x4_t neon_i32;
SIMDE_ALIGN(16) int64x2_t neon_i64;
SIMDE_ALIGN(16) uint8x16_t neon_u8;
SIMDE_ALIGN(16) uint16x8_t neon_u16;
SIMDE_ALIGN(16) uint32x4_t neon_u32;
SIMDE_ALIGN(16) uint64x2_t neon_u64;
SIMDE_ALIGN(16) float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN(16) float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN(16) v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN(16) SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
#if !defined(__x86_64__) || !defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_round_ps (simde__m128 a, int rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndaq_f32(a_.neon_f32);
#elif defined(simde_math_roundf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps(a, rounding)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps(a, rounding)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN(16) simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = {
16, 17, 18, 19,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15
};
r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,1,0))
return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION)
/* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float))
is missing from XL C/C++ v16.1.1,
though the documentation (table 89 on page 432 of the IBM XL C/C++ for
Linux Compiler Reference, Version 16.1.1) shows that it should be
present. Both GCC and clang support it. */
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && !defined(__clang__) && 0
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && 0 /* TODO */
r_.neon_f32 = vmovl_s16(vget_low_s16(vuzp1q_s16(a_.neon_i16, vmovq_n_s16(0))));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
r_ = *SIMDE_ALIGN_CAST(simde__m128_private const*, mem_addr);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps1 (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load_ps1(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && 0
/* TODO: XLC documentation has it, but it doesn't seem to work.
* More investigation is necessary. */
r_.altivec_f32 = vec_reve(a_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#else
r_.f32[0] = mem_addr[0];
r_.f32[1] = mem_addr[1];
r_.f32[2] = mem_addr[2];
r_.f32[3] = mem_addr[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = { 0, 1, 2, 3 };
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
# define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P5_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps1 (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[0];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store_ps1(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store1_ps(mem_addr, a);
#else
simde_mm_store_ps1(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->f32[0] = vgetq_lane_f32(a_.neon_f32, 2);
dest_->f32[1] = vgetq_lane_f32(a_.neon_f32, 3);
#else
dest_->f32[0] = a_.f32[2];
dest_->f32[1] = a_.f32[3];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 3);
mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 2);
mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 1);
mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(SIMDE_ASSUME_ALIGNED(16, mem_addr), a_.neon_f32);
#else
simde_memcpy(SIMDE_ASSUME_ALIGNED(16, mem_addr), &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
vget_low_f32(ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
vget_low_f32(ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
vget_high_f32(ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
vget_high_f32(ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 tmp3, tmp2, tmp1, tmp0; \
tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(tmp0, tmp2); \
row1 = simde_mm_movehl_ps(tmp2, tmp0); \
row2 = simde_mm_movelh_ps(tmp1, tmp3); \
row3 = simde_mm_movehl_ps(tmp3, tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
binary_move_generator.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_NEIGHBORHOOD_BINARY_MOVE_GENERATOR_H__
#define PRINTEMPS_NEIGHBORHOOD_BINARY_MOVE_GENERATOR_H__
#include "abstract_move_generator.h"
namespace printemps {
namespace neighborhood {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
class BinaryMoveGenerator
: public AbstractMoveGenerator<T_Variable, T_Expression> {
private:
public:
/*************************************************************************/
BinaryMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
virtual ~BinaryMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
void setup(
const std::vector<model_component::Variable<T_Variable, T_Expression> *>
&a_RAW_VARIABLE_PTRS) {
/**
* "Flip" move for binary variables:
* e.g) binary variable x \in {0, 1}
* move: {(x = 1)} (if x = 0)
* {(x = 0)} (if x = 1)
*/
/**
* Extract mutable variables.
*/
auto mutable_variable_ptrs =
extract_mutable_variable_ptrs(a_RAW_VARIABLE_PTRS);
/**
* Setup move objects.
*/
const int VARIABLES_SIZE = mutable_variable_ptrs.size();
this->m_moves.resize(VARIABLES_SIZE);
this->m_flags.resize(VARIABLES_SIZE);
for (auto i = 0; i < VARIABLES_SIZE; i++) {
auto &move = this->m_moves[i];
move.sense = MoveSense::Binary;
move.related_constraint_ptrs =
mutable_variable_ptrs[i]->related_constraint_ptrs();
move.alterations.emplace_back(mutable_variable_ptrs[i], 0);
move.is_univariable_move = true;
move.is_selection_move = false;
move.is_special_neighborhood_move = false;
move.is_available = true;
move.overlap_rate = 0.0;
}
/**
* Setup move updater.
*/
auto move_updater = //
[this, mutable_variable_ptrs, VARIABLES_SIZE](
auto * a_moves_ptr, //
auto * a_flags, //
const bool a_ACCEPT_ALL, //
const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, //
const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, //
[[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) {
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < VARIABLES_SIZE; i++) {
if (a_ACCEPT_ALL ||
(a_ACCEPT_OBJECTIVE_IMPROVABLE &&
mutable_variable_ptrs[i]->is_objective_improvable()) ||
(a_ACCEPT_FEASIBILITY_IMPROVABLE &&
mutable_variable_ptrs[i]
->is_feasibility_improvable())) {
(*a_moves_ptr)[i].alterations.front().second =
1 - mutable_variable_ptrs[i]->value();
(*a_flags)[i] = 1;
} else {
(*a_flags)[i] = 0;
}
}
};
this->m_move_updater = move_updater;
}
};
} // namespace neighborhood
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
sunmd5_fmt_plug.c | /*
* First cut, which was oSSL only, and done in 2 source files, by
* Bartavelle (please change to proper cite).
* Corrections, and re-write into SSE2, JimF.
*
* This software was written by Bartavelle <cite> and JimF
* jfoug AT cox dot net, in 2012 for CMIYC-12. No copyright is claimed,
* and the software is hereby placed in the public domain. In case this
* attempt to disclaim copyright and place the software in the public
* domain is deemed null and void, then the software is:
* Copyright (c) 2012 Bartavelle and JimF and it is hereby released to
* the general public under the following terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sunmd5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sunmd5);
#else
#include <string.h>
#include "os.h"
#if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER
#include <unistd.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "options.h"
#include "misc.h"
#include "params.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "loader.h"
#include "memory.h"
#include "md5.h"
#include "simd-intrinsics.h"
#include "memdbg.h"
/*
* these 2 are for testing non-MMX mode. if we
* undefine these 2, then we force build oSSL model.
*/
//#undef SIMD_PARA_MD5
//#undef SIMD_COEF_32
#ifndef MD5_CBLOCK
#define MD5_CBLOCK 64
#endif
#ifndef MD5_DIGEST_LENGTH
#define MD5_DIGEST_LENGTH 16
#endif
#define STRINGIZE2(s) #s
#define STRINGIZE(s) STRINGIZE2(s)
#define PLAINTEXT_LENGTH 120
/* JtR actually only 'uses' 4 byte binaries from this format, but for cmp_exact we need full binary */
#define FULL_BINARY_SIZE 16
#define BINARY_SIZE 4
#define BINARY_ALIGN 4
/* salt==48 allows $md5$ (5) rounds=999999$ (14) salt (16) null(1) (40 allows for 19 byte salt) */
#define SALT_SIZE 40
#define SALT_ALIGN 1
#if SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SIMD_COEF_32
#define MAX_KEYS_PER_CRYPT (16 * SIMD_COEF_32 * SIMD_PARA_MD5)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define FORMAT_LABEL "SunMD5"
#define FORMAT_NAME ""
#define FORMAT_TAG "$md5$"
#define FORMAT_TAG2 "$md5,"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
// it is salted, but very slow, AND there is no difference between 1 and multi salts, so simply turn off salt benchmarks
#define BENCHMARK_LENGTH -1
/* THIS one IS a depricated sun string, but for real: $md5$3UqYqndY$$6P.aaWOoucxxq.l00SS9k0: Sun MD5 "password" */
/* $md5,rounds=5000$GUBv0xjJ$$mSwgIswdjlTY0YxV7HBVm0 passwd This one was the python code from http://packages.python.org/passlib/lib/passlib.hash.sun_md5_crypt.html, but the rounds are busted. */
static struct fmt_tests tests[] = {
{"$md5$rounds=904$Vc3VgyFx44iS8.Yu$Scf90iLWN6O6mT9TA06NK/", "test"},
/* from CMIYC-12 */
{"$md5$rounds=904$ZZZig8GS.S0pRNhc$dw5NMYJoxLlnFq4E.phLy.", "Don41dL33"},
{"$md5$rounds=904$zSuVTn567UJLv14u$q2n2ZBFwKg2tElFBIzUq/0", "J4ck!3Wood"},
{"$md5$rounds=904$zuZVga3IOSfOshxU$gkUlHjR6apc6cr.7Bu5tt/", "K!m!M4rt!n"},
{"$md5$rounds=904$/KP7bVaKYTOcplkx$i74NBQdysLaDTUSEu5FtQ.", "people"},
{"$md5$rounds=904$/p4qqfWbTQcUqjNc$leW.8/vzyDpFQxSZrV0x.0", "me"},
{"$md5$rounds=904$wOyGLc0NMRiXJTvI$v69lVSnLif78hZbZWhuEG1", "private"},
// from pass_gen.pl 120 bytes long.
{"$md5$rounds=904$Vc3VgyFx44iS8.Yu$mEyEet31IlEkO4HTeobmq0", "012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"},
{NULL}
};
#ifdef SIMD_PARA_MD5
#define PARA SIMD_PARA_MD5
#else
#define PARA 1
#endif
#ifdef SIMD_COEF_32
#define COEF SIMD_COEF_32
#define BLK_CNT (PARA*COEF)
#if PARA > 1
/*
* for para-3 32 bit at MAX_KEYS=1k, 0==281 1==292 2==284 3==284 4==283 5==282
* for para-3 32 bit at MAX_KEYS=512, 0==286 1==287 2==279 3==279 4==278 5==278
* for para-3 32 bit at MAX_KEYS=256, 0==278 1==282 2==276 3==274 4==274 5==274 Above these, the same speed
* for para-3 32 bit at MAX_KEYS=128, 0==272 1==277 2==271 3==270 4==271 5==270
* for para-3 32 bit at MAX_KEYS=64, 0==259 1==264 2==264 3==263 4==259 5==270
*/
#define MIN_DROP_BACK 1
#else
#define MIN_DROP_BACK 1
#endif
//#define GETPOS(i, index) ( ((index)&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) )
//#define PARAGETPOS(i, index) ( ((index)&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + ((index)/SIMD_COEF_32)*SIMD_COEF_32*64 )
// these next 2 defines are same as above, but faster (on my gcc). Speed went fro 282 to 292, about 3.5% improvement. Shifts vs mults.
#define GETPOS(i, index) ( (((index)&(SIMD_COEF_32-1))<<2) + (((i)&(0xffffffff-3))*SIMD_COEF_32) + ((i)&3) )
#define PARAGETPOS(i, index) ( (((index)&(SIMD_COEF_32-1))<<2) + (((i)&(0xffffffff-3))*SIMD_COEF_32) + ((i)&3) + (((unsigned int)index/SIMD_COEF_32*SIMD_COEF_32)<<6) )
/* GETPOS0 can be 'faster' if we already have a pointer to the first DWORD in this block. Thus we can do a GETPOS(0,idx), and then multiple GETPOS0(x) and sometimes be faster */
#define GETPOS0(i) ( (((i)&(0xffffffff-3))*SIMD_COEF_32) + ((i)&3) )
/* output buffer for para is only 16 bytes per COEF, vs 64, so it's fewer bytes to jumbo to the next PARA start */
#define PARAGETOUTPOS(i, index) ( (((index)&(SIMD_COEF_32-1))<<2) + (((i)&(0xffffffff-3))*SIMD_COEF_32) + ((i)&3) + (((unsigned int)index/SIMD_COEF_32*SIMD_COEF_32)<<4) )
static unsigned char (*input_buf)[BLK_CNT*MD5_CBLOCK];
static unsigned char (*out_buf)[BLK_CNT*MD5_DIGEST_LENGTH];
static unsigned char (*input_buf_big)[25][BLK_CNT*MD5_CBLOCK];
#else
#define COEF 1
#endif
/* allocated in init() */
static char (*crypt_out)[FULL_BINARY_SIZE];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static char *saved_salt;
/* minimum number of rounds we do, not including the per-user ones */
#define BASIC_ROUND_COUNT 4096 /* enough to make things interesting */
#define DIGEST_LEN 16
#define ROUND_BUFFER_LEN 64
/* ------------------------------------------------------------------ */
typedef struct {
MD5_CTX context; /* working buffer for MD5 algorithm */
unsigned char digest[DIGEST_LEN]; /* where the MD5 digest is stored */
} JTR_ALIGN(MEM_ALIGN_CACHE) Contx, *pConx;
static Contx *data;
/*
* Public domain quotation courtesy of Project Gutenberg.
* ftp://metalab.unc.edu/pub/docs/books/gutenberg/etext98/2ws2610.txt
* Hamlet III.ii - 1517 bytes, including trailing NUL
* ANSI-C string constant concatenation is a requirement here.
*/
#define constant_phrase_size 1517
static const char constant_phrase[] =
"To be, or not to be,--that is the question:--\n"
"Whether 'tis nobler in the mind to suffer\n"
"The slings and arrows of outrageous fortune\n"
"Or to take arms against a sea of troubles,\n"
"And by opposing end them?--To die,--to sleep,--\n"
"No more; and by a sleep to say we end\n"
"The heartache, and the thousand natural shocks\n"
"That flesh is heir to,--'tis a consummation\n"
"Devoutly to be wish'd. To die,--to sleep;--\n"
"To sleep! perchance to dream:--ay, there's the rub;\n"
"For in that sleep of death what dreams may come,\n"
"When we have shuffled off this mortal coil,\n"
"Must give us pause: there's the respect\n"
"That makes calamity of so long life;\n"
"For who would bear the whips and scorns of time,\n"
"The oppressor's wrong, the proud man's contumely,\n"
"The pangs of despis'd love, the law's delay,\n"
"The insolence of office, and the spurns\n"
"That patient merit of the unworthy takes,\n"
"When he himself might his quietus make\n"
"With a bare bodkin? who would these fardels bear,\n"
"To grunt and sweat under a weary life,\n"
"But that the dread of something after death,--\n"
"The undiscover'd country, from whose bourn\n"
"No traveller returns,--puzzles the will,\n"
"And makes us rather bear those ills we have\n"
"Than fly to others that we know not of?\n"
"Thus conscience does make cowards of us all;\n"
"And thus the native hue of resolution\n"
"Is sicklied o'er with the pale cast of thought;\n"
"And enterprises of great pith and moment,\n"
"With this regard, their currents turn awry,\n"
"And lose the name of action.--Soft you now!\n"
"The fair Ophelia!--Nymph, in thy orisons\n"
"Be all my sins remember'd.\n";
static unsigned char mod5[0x100];
static void init(struct fmt_main *self)
{
int i;
#ifdef SIMD_COEF_32
int j, k, ngroups = 1;
#endif
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#ifdef SIMD_COEF_32
ngroups = omp_t;
#endif
#endif
#ifdef SIMD_COEF_32
/*
* allocate SSE2 input and output buffer space. For input's we have
* 2 buffers. One does the 'short' 1 block crypts. The other does the
* long 25 block crypts. All MUST be aligned to SIMD.
*/
input_buf = mem_calloc_align(ngroups, sizeof(*input_buf), MEM_ALIGN_CACHE);
input_buf_big = mem_calloc_align(ngroups, sizeof(*input_buf_big), MEM_ALIGN_CACHE);
out_buf = mem_calloc_align(ngroups, sizeof(*out_buf), MEM_ALIGN_CACHE);
/* not super optimal, but only done one time, at program startup, so speed is not important */
for (k = 0; k < ngroups; ++k) {
for (i = 0; i < constant_phrase_size; ++i) {
for (j = 0; j < BLK_CNT; ++j)
input_buf_big[k][(i+16)/64][PARAGETPOS((16+i)%64,j)] = constant_phrase[i];
}
}
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key));
saved_salt = mem_calloc(1, SALT_SIZE + 1);
crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out));
data = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*data), MEM_ALIGN_CACHE);
for (i = 0; i < 0x100; i++)
mod5[i] = i % 5;
}
static void done(void)
{
MEM_FREE(data);
MEM_FREE(crypt_out);
MEM_FREE(saved_salt);
MEM_FREE(saved_key);
#ifdef SIMD_COEF_32
MEM_FREE(input_buf);
MEM_FREE(input_buf_big);
MEM_FREE(out_buf);
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
/* Common prefix. */
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) &&
strncmp(ciphertext, FORMAT_TAG2, FORMAT_TAG_LEN))
return 0;
ciphertext += FORMAT_TAG_LEN-1;
/* Optional rounds. */
if (!strncmp(ciphertext, ",rounds=", 8) ||
!strncmp(ciphertext, "$rounds=", 8)) {
pos = ciphertext += 8;
while (*ciphertext >= '0' && *ciphertext <= '9')
ciphertext++;
/* Accept only numbers from 0 to 999999? */
/* Zero-padding is ok */
if (ciphertext - pos < 1 || ciphertext - pos > 6)
return 0;
}
if (*ciphertext++ != '$')
return 0;
/* Salt per se. */
pos = ciphertext;
while (atoi64[ARCH_INDEX(*ciphertext)] != 0x7F)
ciphertext++;
/* Upto 16 salt chars? */
if (ciphertext - pos > 16)
return 0;
/* One or two $ */
if (*ciphertext++ != '$')
return 0;
if (*ciphertext == '$')
ciphertext++;
/* Hash per se. */
pos = ciphertext;
while (atoi64[ARCH_INDEX(*ciphertext)] != 0x7F)
ciphertext++;
/* Samples from CMIYC-12 have garbage in padding bits.
Hence the check is disabled for now. */
if (ciphertext - pos != 22
/* || atoi64[ARCH_INDEX(*(ciphertext - 1))] & 0x0F */)
return 0;
/* No garbage at the end */
if (*ciphertext)
return 0;
return 1;
}
static long from64 (unsigned char *s, int n) {
long l = 0;
while (--n >= 0) {
l <<= 6;
l += atoi64[s[n]];
}
return l;
}
static void *get_binary(char *ciphertext)
{
static union {
char c[FULL_BINARY_SIZE];
uint32_t w[FULL_BINARY_SIZE / sizeof(uint32_t)];
} out;
unsigned l;
unsigned char *cp;
cp = (unsigned char*)strrchr(ciphertext, '$');
++cp;
l = from64(cp, 4);
out.c[0] = l>>16; out.c[6] = (l>>8)&0xFF; out.c[12] = l&0xFF;
l = from64(&cp[4], 4);
out.c[1] = l>>16; out.c[7] = (l>>8)&0xFF; out.c[13] = l&0xFF;
l = from64(&cp[8], 4);
out.c[2] = l>>16; out.c[8] = (l>>8)&0xFF; out.c[14] = l&0xFF;
l = from64(&cp[12], 4);
out.c[3] = l>>16; out.c[9] = (l>>8)&0xFF; out.c[15] = l&0xFF;
l = from64(&cp[16], 4);
out.c[4] = l>>16; out.c[10] = (l>>8)&0xFF; out.c[5] = l&0xFF;
l = from64(&cp[20], 2);
out.c[11] = l;
return out.c;
}
static void *get_salt(char *ciphertext)
{
static char out[SALT_SIZE];
char *p = strrchr(ciphertext, '$');
memset(out, 0, sizeof(out));
memcpy(out, ciphertext, p - ciphertext);
return out;
}
static int get_hash_0(int index) { return *((uint32_t*)(crypt_out[index])) & PH_MASK_0; }
static int get_hash_1(int index) { return *((uint32_t*)(crypt_out[index])) & PH_MASK_1; }
static int get_hash_2(int index) { return *((uint32_t*)(crypt_out[index])) & PH_MASK_2; }
static int get_hash_3(int index) { return *((uint32_t*)(crypt_out[index])) & PH_MASK_3; }
static int get_hash_4(int index) { return *((uint32_t*)(crypt_out[index])) & PH_MASK_4; }
static int get_hash_5(int index) { return *((uint32_t*)(crypt_out[index])) & PH_MASK_5; }
static int get_hash_6(int index) { return *((uint32_t*)(crypt_out[index])) & PH_MASK_6; }
static int salt_hash(void *salt)
{
int h;
char *sp = (char *)salt;
char *cp = strrchr(sp, '$');
if (cp) --cp;
else cp = &sp[strlen(sp)-1];
h = atoi64[ARCH_INDEX(*cp--)];
h ^= (unsigned char)*cp--;
h <<= 5;
h ^= atoi64[ARCH_INDEX(*cp--)];
h ^= (unsigned char)*cp++;
return h & (SALT_HASH_SIZE - 1);
}
static void set_salt(void *salt)
{
memcpy(saved_salt, salt, SALT_SIZE);
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (*((uint32_t*)binary) == *((uint32_t*)crypt_out[index]))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((uint32_t*)binary) == *((uint32_t*)crypt_out[index]);
}
static int cmp_exact(char *source, int index)
{
return !memcmp(get_binary(source), crypt_out[index], FULL_BINARY_SIZE);
}
// inline function, as a macro.
#define md5bit_1(d,b) ((d[((b)>>3)&0xF]&(1<<((b)&7))) ? 1 : 0)
// md5bit with no conditional logic.
#define md5bit_2(d,b) (((d[((b)>>3)&0xF]>>((b)&7)))&1)
inline static int
md5bit(unsigned char *digest, int bit_num)
{
return (((digest[((bit_num)>>3)&0xF]>>((bit_num)&7)))&1);
#if 0
/* original function from sun code */
int byte_off;
int bit_off;
bit_num %= 128; /* keep this bounded for convenience */
byte_off = bit_num / 8;
bit_off = bit_num % 8;
/* return the value of bit N from the digest */
return ((digest[byte_off] & (0x01 << bit_off)) ? 1 : 0);
#endif
}
inline static int
coin_step(unsigned char *digest, int i, int j, int shift)
{
return md5bit(digest, digest[(digest[i] >> mod5[digest[j]]) & 0x0F] >> ((digest[j] >> (digest[i] & 0x07)) & 0x01)) << shift;
}
#define ROUNDS "rounds="
#define ROUNDSLEN 7
/*
* get the integer value after rounds= where ever it occurs in the string.
* if the last char after the int is a , or $ that is fine anything else is an
* error.
*/
static unsigned int
getrounds(const char *s)
{
char *r, *p, *e;
long val;
if (s == NULL)
return (0);
if ((r = strstr(s, ROUNDS)) == NULL) {
return (0);
}
if (strncmp(r, ROUNDS, ROUNDSLEN) != 0) {
return (0);
}
p = r + ROUNDSLEN;
val = strtol(p, &e, 10);
/*
* An error occurred or there is non-numeric stuff at the end
* which isn't one of the crypt(3c) special chars ',' or '$'
*/
if (val < 0 ||
!(*e == '\0' || *e == ',' || *e == '$')) {
fprintf(stderr, "crypt_sunmd5: invalid rounds specification \"%s\"", s);
return (0);
}
return ((unsigned int)val);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int idx, group_idx;
#ifdef _OPENMP
int ngroups = OMP_SCALE * omp_get_max_threads();
#else
int ngroups = 1;
#endif
int group_sz = (count + ngroups - 1) / ngroups;
for (idx = 0; idx < count; ++idx) {
/* initialise the context */
MD5_Init(&data[idx].context);
/* update with the (hopefully entropic) plaintext */
MD5_Update(&data[idx].context, (unsigned char *)saved_key[idx], strlen(saved_key[idx]));
/* update with the (publicly known) salt */
MD5_Update(&data[idx].context, (unsigned char *)saved_salt, strlen(saved_salt));
/* compute the digest */
MD5_Final(data[idx].digest, &data[idx].context);
}
#ifdef _OPENMP
#ifdef __INTEL_COMPILER
#ifdef SIMD_COEF_32
#pragma omp parallel for default(none) private(idx) shared(ngroups, group_sz, saved_salt, data, input_buf, input_buf_big, out_buf, constant_phrase)
#else
#pragma omp parallel for default(none) private(idx) shared(ngroups, group_sz, saved_salt, data, constant_phrase)
#endif // SIMD_COEF_32
#else
#ifdef SIMD_COEF_32
#pragma omp parallel for default(none) private(idx) shared(ngroups, group_sz, saved_salt, data, input_buf, input_buf_big, out_buf)
#else
#pragma omp parallel for default(none) private(idx) shared(ngroups, group_sz, saved_salt, data)
#endif // SIMD_COEF_32
#endif // __INTEL_COMPILER
#endif // _OPENMP
for (group_idx = 0; group_idx < ngroups; ++group_idx) {
int roundasciilen;
int round, maxrounds = BASIC_ROUND_COUNT + getrounds(saved_salt);
char roundascii[8];
int idx_begin = group_idx * group_sz;
int idx_end = idx_begin + group_sz > count ?
count : idx_begin + group_sz;
#ifdef SIMD_COEF_32
int i, j, zs, zb, zs0, zb0;
int bigs[MAX_KEYS_PER_CRYPT], smalls[MAX_KEYS_PER_CRYPT];
int nbig, nsmall;
// int zb2; // used in debugging
memset(input_buf[group_idx], 0, BLK_CNT*MD5_CBLOCK);
#endif
/*
* now to delay high-speed md5 implementations that have stuff
* like code inlining, loops unrolled and table lookup
*/
/* this is the 'first' sprintf(roundascii,"%d",round); The rest are at the bottom of the loop */
/* some compilers dont allow strcpy inside OMP block with default(none) used */
//strcpy(roundascii, "0");
roundascii[0] = '0';
roundascii[1] = 0;
roundasciilen=1;
for (round = 0; round < maxrounds; round++) {
#ifdef SIMD_COEF_32
nbig = nsmall = 0;
#endif
/*
* now this is computed at bottom of loop (we start properly set at "0", len==1)
* ** code replaced**
* roundasciilen = sprintf(roundascii, "%d", round);
*/
for (idx = idx_begin; idx < idx_end; ++idx) {
pConx px = &data[idx];
int indirect_a =
md5bit(px->digest, round) ?
coin_step(px->digest, 1, 4, 0) |
coin_step(px->digest, 2, 5, 1) |
coin_step(px->digest, 3, 6, 2) |
coin_step(px->digest, 4, 7, 3) |
coin_step(px->digest, 5, 8, 4) |
coin_step(px->digest, 6, 9, 5) |
coin_step(px->digest, 7, 10, 6)
:
coin_step(px->digest, 0, 3, 0) |
coin_step(px->digest, 1, 4, 1) |
coin_step(px->digest, 2, 5, 2) |
coin_step(px->digest, 3, 6, 3) |
coin_step(px->digest, 4, 7, 4) |
coin_step(px->digest, 5, 8, 5) |
coin_step(px->digest, 6, 9, 6);
int indirect_b =
md5bit(px->digest, round + 64) ?
coin_step(px->digest, 9, 12, 0) |
coin_step(px->digest, 10, 13, 1) |
coin_step(px->digest, 11, 14, 2) |
coin_step(px->digest, 12, 15, 3) |
coin_step(px->digest, 13, 0, 4) |
coin_step(px->digest, 14, 1, 5) |
coin_step(px->digest, 15, 2, 6)
:
coin_step(px->digest, 8, 11, 0) |
coin_step(px->digest, 9, 12, 1) |
coin_step(px->digest, 10, 13, 2) |
coin_step(px->digest, 11, 14, 3) |
coin_step(px->digest, 12, 15, 4) |
coin_step(px->digest, 13, 0, 5) |
coin_step(px->digest, 14, 1, 6);
int bit = md5bit(px->digest, indirect_a) ^ md5bit(px->digest, indirect_b);
/* xor a coin-toss; if true, mix-in the constant phrase */
#ifndef SIMD_COEF_32
/*
* This is the real 'crypt'. Pretty trival, but there are 2 possible sizes
* there is a 1 block crypte, and a 25 block crypt. They are chosen based
* upon the 'long' coin flip algorithm above.
*/
/* re-initialise the context */
MD5_Init(&px->context);
/* update with the previous digest */
MD5_Update(&px->context, px->digest, sizeof (px->digest));
/* optional, add a constant string. This is what makes the 'long' crypt loops */
if (bit)
MD5_Update(&px->context, (unsigned char *) constant_phrase, constant_phrase_size);
/* Add a decimal current roundcount */
MD5_Update(&px->context, (unsigned char *) roundascii, roundasciilen);
MD5_Final(px->digest, &px->context);
#else
/*
* we do not actually perform the work here. We run through all of the
* keys we are working on, and figure out which ones need 'small' buffers
* and which ones need large buffers. Then we can group them SIMD_COEF_32*SIMD_PARA_MD5
* at a time, later in the process.
*/
if (bit)
bigs[nbig++] = idx;
else
smalls[nsmall++] = idx;
#endif
}
#ifdef SIMD_COEF_32
/*
* ok, at this time we know what group each element is in. Either a large
* crypt, or small one. Now group our crypts up based upon the crypt size
* doing COEF*PARA at a time, until we have 2 'partial' buffers left. We
* 'waste' some CPU in them, but that is what happens. If there is only 1 or
* or 2, we may even drop back and use oSSL, it may be faster than an entire
* SSE crypt. We will have to time test, and find where the cut over point is
* but likely it will NOT be 0. The cuttover appears to be 1, meaning that 0,
* only a 1 limb PARA buffer will not be done (and will fall back to oSSL). This
* was for PARA==3 on 32 bit. A much BIGGER difference was in the MAX_KEYS_PER_CRYPT
* increasing this does make for more speed, HOWEVER, it also makes for more lost time
* if the run is stopped, since ALL of the words in the keys buffer would have to be
* redone again (hopefully only redone over the candidates left to test in the input file).
* The choice to use 512 MAX_KEYS seems about right.
*/
/********************************************/
/* get the little ones out of the way first */
/********************************************/
/* first, put the length text, 0x80, and buffer length into the buffer 1 time, not in the loop */
for (j = 0; j < BLK_CNT; ++j) {
unsigned char *cpo = &input_buf[group_idx][PARAGETPOS(0, j)];
int k;
for (k = 0; k < roundasciilen; ++k) {
cpo[GETPOS0(k+16)] = roundascii[k];
}
cpo[GETPOS0(k+16)] = 0x80;
((uint32_t*)cpo)[14 * SIMD_COEF_32]=((16+roundasciilen)<<3);
}
/* now do the 'loop' for the small 1-limb blocks. */
zs = zs0 = zb = zb0 = 0;
// zb2 = 0; /* for debugging */
for (i = 0; i < nsmall-MIN_DROP_BACK; i += BLK_CNT) {
for (j = 0; j < BLK_CNT && zs < nsmall; ++j) {
pConx px = &data[smalls[zs++]];
uint32_t *pi = (uint32_t*)px->digest;
uint32_t *po = (uint32_t*)&input_buf[group_idx][PARAGETPOS(0, j)];
/*
* digest is flat, input buf is SSE_COEF.
* input_buf is po (output) here, we are writing to it.
*/
po[0] = pi[0];
po[COEF] = pi[1];
po[COEF+COEF] = pi[2];
po[COEF+COEF+COEF] = pi[3];
}
SIMDmd5body(input_buf[group_idx], (unsigned int *)out_buf[group_idx], NULL, SSEi_MIXED_IN);
/*
* we convert from COEF back to flat. since this data will later be used
* in non linear order, there is no gain trying to keep it in COEF order
*/
for (j = 0; j < BLK_CNT && zs0 < nsmall; ++j) {
uint32_t *pi, *po;
pConx px = &data[smalls[zs0++]];
pi = (uint32_t*)&out_buf[group_idx][PARAGETOUTPOS(0, j)];
po = (uint32_t*)px->digest;
po[0] = pi[0];
po[1] = pi[COEF];
po[2] = pi[COEF+COEF];
po[3] = pi[COEF+COEF+COEF];
}
}
/* this catches any left over small's, and simply uses oSSL */
while (zs < nsmall) {
pConx px = &data[smalls[zs++]];
MD5_Init(&px->context);
MD5_Update(&px->context, px->digest, sizeof (px->digest));
MD5_Update(&px->context, (unsigned char *) roundascii, roundasciilen);
MD5_Final(px->digest, &px->context);
}
/*****************************************************************************
* Now do the big ones. These are more complex that the little ones
* (much more complex actually). Here, we have to insert the prior crypt
* into the first 16 bytes (just like in the little ones, but then we have
* our buffer 'pre-loaded' with a 1517 byte string. we append the text number
* after the null byte of that 1517 byte string, then put on the 0x80, and
* then put the bit length. NOTE, that this actually is an array of 25
* SSE_PARA buffer blocks, so there is quite a bit more manipluation of where
* in the buffer to write this. This is most noted in the text number, where
* it spills over from buffer 24 to 25.
*****************************************************************************/
/* first, put the length text, 0x80, and buffer length into the buffer 1 time, not in the loop */
for (j = 0; j < BLK_CNT; ++j) {
unsigned char *cpo23 = &(input_buf_big[group_idx][23][PARAGETPOS(0, j)]);
unsigned char *cpo24 = &(input_buf_big[group_idx][24][PARAGETPOS(0, j)]);
*((uint32_t*)cpo24) = 0; /* key clean */
cpo23[GETPOS0(61)] = roundascii[0];
switch(roundasciilen) {
case 1:
cpo23[GETPOS0(62)] = 0x80;
cpo23[GETPOS0(63)] = 0; /* key clean. */
break;
case 2:
cpo23[GETPOS0(62)] = roundascii[1];
cpo23[GETPOS0(63)] = 0x80;
break;
case 3:
cpo23[GETPOS0(62)] = roundascii[1];
cpo23[GETPOS0(63)] = roundascii[2];
cpo24[0] = 0x80;
break;
case 4:
cpo23[GETPOS0(62)] = roundascii[1];
cpo23[GETPOS0(63)] = roundascii[2];
cpo24[0] = roundascii[3];
cpo24[1] = 0x80;
break;
case 5:
cpo23[GETPOS0(62)] = roundascii[1];
cpo23[GETPOS0(63)] = roundascii[2];
cpo24[0] = roundascii[3];
cpo24[1] = roundascii[4];
cpo24[2] = 0x80;
break;
case 6:
cpo23[GETPOS0(62)] = roundascii[1];
cpo23[GETPOS0(63)] = roundascii[2];
cpo24[0] = roundascii[3];
cpo24[1] = roundascii[4];
cpo24[2] = roundascii[5];
cpo24[3] = 0x80;
break;
}
((uint32_t*)cpo24)[14*SIMD_COEF_32]=((16+constant_phrase_size+roundasciilen)<<3);
}
for (i = 0; i < nbig-MIN_DROP_BACK; i += BLK_CNT) {
for (j = 0; j < BLK_CNT && zb < nbig; ++j) {
pConx px = &data[bigs[zb++]];
uint32_t *pi = (uint32_t *)px->digest;
uint32_t *po = (uint32_t*)&input_buf_big[group_idx][0][PARAGETPOS(0, j)];
/*
* digest is flat, input buf is SSE_COEF.
* input_buf is po (output) here, we are writing to it.
*/
po[0] = pi[0];
po[COEF] = pi[1];
po[COEF+COEF] = pi[2];
po[COEF+COEF+COEF] = pi[3];
}
SIMDmd5body(input_buf_big[group_idx][0], (unsigned int *)out_buf[group_idx], NULL, SSEi_MIXED_IN);
for (j = 1; j < 25; ++j)
SIMDmd5body(input_buf_big[group_idx][j], (unsigned int *)out_buf[group_idx], (unsigned int *)out_buf[group_idx], SSEi_RELOAD|SSEi_MIXED_IN);
for (j = 0; j < BLK_CNT && zb0 < nbig; ++j) {
uint32_t *pi, *po;
pConx px = &data[bigs[zb0++]];
pi = (uint32_t*)&out_buf[group_idx][PARAGETOUTPOS(0, j)];
po = (uint32_t*)px->digest;
po[0] = pi[0];
po[1] = pi[COEF];
po[2] = pi[COEF+COEF];
po[3] = pi[COEF+COEF+COEF];
}
}
/* this catches any left overs, and simply uses oSSL */
while (zb < nbig) {
pConx px = &data[bigs[zb++]];
MD5_Init(&px->context);
MD5_Update(&px->context, px->digest, sizeof (px->digest));
MD5_Update(&px->context, (unsigned char *) constant_phrase, constant_phrase_size);
MD5_Update(&px->context, (unsigned char *) roundascii, roundasciilen);
MD5_Final(px->digest, &px->context);
}
#endif
/*
* this is the equivalent of the original code:
* roundasciilen = sprintf(roundascii, "%d", round);
* that was at the top of this rounds loop. We have moved
* it to the bottom. It does compute one 'extra' value that
* is never used (5001), but it is faster, and that one
* extra value causes no harm.
* we do call the sprintf a few times (at 10, 100, 1000, etc)
* but we only call it there.
*/
if (++roundascii[roundasciilen-1] == '9'+1) {
int j = roundasciilen-1;
if (j > 0) {
do {
roundascii[j] = '0';
++roundascii[--j];
} while (j > 0 && roundascii[j] == '9'+1);
}
if (!j && roundascii[0] == '9'+1) {
/* some compilers dont allow sprintf inside OMP block */
//roundasciilen = sprintf(roundascii, "%d", round+1);
roundascii[0] = '1';
roundascii[roundasciilen++] = '0';
roundascii[roundasciilen] = 0;
}
}
}
}
for (idx = 0; idx < count; ++idx) {
pConx px = &data[idx];
memcpy(crypt_out[idx], px->digest, FULL_BINARY_SIZE);
}
return count;
}
/*
* the number of iterations is the sum of a "basic round count" (4096) and
* a configurable "per-user round count"; we report the sum as cost
*/
unsigned int sunmd5_cost(void *salt)
{
return (unsigned int) (BASIC_ROUND_COUNT + getrounds(salt));
}
struct fmt_main fmt_sunmd5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
/*
* sum of a "basic round count" (4096) and
* a configurable "per-user round count"
*/
"iteration count",
},
{ FORMAT_TAG, FORMAT_TAG2 },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
sunmd5_cost,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
dgeswp.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgeswp.c, normal z -> d, Fri Sep 28 17:38:06 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
/******************************************************************************/
int plasma_dgeswp(plasma_enum_t colrow,
int m, int n,
double *pA, int lda, int *ipiv, int incx)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((colrow != PlasmaColumnwise) &&
(colrow != PlasmaRowwise)) {
plasma_error("illegal value of colrow");
return -1;
}
if (m < 0) {
plasma_error("illegal value of m");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -5;
}
// quick return
if (imin(n, m) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_geswp(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_general_desc_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
// Call tile async function.
plasma_omp_dgeswp(colrow, A, ipiv, incx, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/******************************************************************************/
void plasma_omp_dgeswp(plasma_enum_t colrow,
plasma_desc_t A, int *ipiv, int incx,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((colrow != PlasmaColumnwise) &&
(colrow != PlasmaRowwise)) {
plasma_error("illegal value of colrow");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
plasma_pdgeswp(colrow, A, ipiv, incx, sequence, request);
}
|
GB_binop__gt_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__gt_int16
// A.*B function (eWiseMult): GB_AemultB__gt_int16
// A*D function (colscale): GB_AxD__gt_int16
// D*A function (rowscale): GB_DxB__gt_int16
// C+=B function (dense accum): GB_Cdense_accumB__gt_int16
// C+=b function (dense accum): GB_Cdense_accumb__gt_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__gt_int16
// C=scalar+B GB_bind1st__gt_int16
// C=scalar+B' GB_bind1st_tran__gt_int16
// C=A+scalar GB_bind2nd__gt_int16
// C=A'+scalar GB_bind2nd_tran__gt_int16
// C type: bool
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_INT16 || GxB_NO_GT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__gt_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__gt_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__gt_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__gt_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__gt_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__gt_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__gt_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__gt_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__gt_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB_bind1st_tran__gt_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB_bind2nd_tran__gt_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_master.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_master()
{
int nthreads;
int executing_thread;
nthreads = 0;
executing_thread = -1;
#pragma omp parallel
{
#pragma omp master
{
#pragma omp critical
{
nthreads++;
}
executing_thread = omp_get_thread_num();
} /* end of master*/
} /* end of parallel*/
return ((nthreads == 1) && (executing_thread == 0));
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_master()) {
num_failed++;
}
}
return num_failed;
}
|
DRB090-static-local-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
For a variable declared in a scope inside an OpenMP construct:
* private if the variable has an automatic storage duration
* shared if the variable has a static storage duration.
Dependence pairs:
tmp@73:5 vs. tmp@73:5
tmp@73:5 vs. tmp@74:12
*/
#include<stdio.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
int a[len], b[len];
#pragma omp parallel for
for (i=0;i<len;i++)
{
a[i]=i;
b[i]=i;
}
/* static storage for a local variable */
#pragma omp parallel
{
static int tmp;
#pragma omp for private(tmp)
for (i=0;i<len;i++)
{
tmp = a[i]+i;
a[i] = tmp;
}
}
/* automatic storage for a local variable */
#pragma omp parallel
{
int tmp;
#pragma omp for private(tmp)
for (i=0;i<len;i++)
{
tmp = b[i]+i;
b[i] = tmp;
}
}
printf("a[50]=%d b[50]=%d\n", a[50], b[50]);
return 0;
}
|
GB_binop__rdiv_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fp64)
// A*D function (colscale): GB (_AxD__rdiv_fp64)
// D*A function (rowscale): GB (_DxB__rdiv_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fp64)
// C=scalar+B GB (_bind1st__rdiv_fp64)
// C=scalar+B' GB (_bind1st_tran__rdiv_fp64)
// C=A+scalar GB (_bind2nd__rdiv_fp64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (bij / aij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y / x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FP64 || GxB_NO_RDIV_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (bij / x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (y / aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij / x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y / aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ngdsac_util.h | /*
Based on the DSAC++ and ESAC code.
https://github.com/vislearn/LessMore
https://github.com/vislearn/esac
Copyright (c) 2016, TU Dresden
Copyright (c) 2020, Heidelberg University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the TU Dresden, Heidelberg University nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TU DRESDEN OR HEIDELBERG UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <omp.h>
// makros for coloring console output
#define GREENTEXT(output) "\x1b[32;1m" << output << "\x1b[0m"
#define REDTEXT(output) "\x1b[31;1m" << output << "\x1b[0m"
#define BLUETEXT(output) "\x1b[34;1m" << output << "\x1b[0m"
#define YELLOWTEXT(output) "\x1b[33;1m" << output << "\x1b[0m"
#define EPS 0.00000001
#define PI 3.1415926
namespace ngdsac
{
/**
* @brief Calculate original image positions of a scene coordinate prediction.
* @param outW Width of the scene coordinate prediction.
* @param outH Height of the scene coordinate prediction.
* @param subSampling Sub-sampling of the scene coordinate prediction wrt. to the input image.
* @param shiftX Horizontal offset in case the input image has been shifted before scene coordinare prediction.
* @param shiftY Vertical offset in case the input image has been shifted before scene coordinare prediction.
* @return Matrix where each entry contains the original 2D image position.
*/
cv::Mat_<cv::Point2i> createSampling(
unsigned outW, unsigned outH,
int subSampling,
int shiftX, int shiftY)
{
cv::Mat_<cv::Point2i> sampling(outH, outW);
#pragma omp parallel for
for(unsigned x = 0; x < outW; x++)
for(unsigned y = 0; y < outH; y++)
{
sampling(y, x) = cv::Point2i(
x * subSampling + subSampling / 2 - shiftX,
y * subSampling + subSampling / 2 - shiftY);
}
return sampling;
}
/**
* @brief Wrapper for OpenCV solvePnP.
* Properly handles empty pose inputs.
* @param objPts List of 3D scene points.
* @param imgPts List of corresponding 2D image points.
* @param camMat Internal calibration matrix of the camera.
* @param distCoeffs Distortion coefficients.
* @param rot Camera rotation (input/output), axis-angle representation.
* @param trans Camera translation.
* @param extrinsicGuess Whether rot and trans already contain an pose estimate.
* @param methodFlag OpenCV PnP method flag.
* @return True if pose estimation succeeded.
*/
inline bool safeSolvePnP(
const std::vector<cv::Point3f>& objPts,
const std::vector<cv::Point2f>& imgPts,
const cv::Mat& camMat,
const cv::Mat& distCoeffs,
cv::Mat& rot,
cv::Mat& trans,
bool extrinsicGuess,
int methodFlag)
{
if(rot.type() == 0) rot = cv::Mat_<double>::zeros(1, 3);
if(trans.type() == 0) trans= cv::Mat_<double>::zeros(1, 3);
if(!cv::solvePnP(
objPts,
imgPts,
camMat,
distCoeffs,
rot,
trans,
extrinsicGuess,
methodFlag))
{
rot = cv::Mat_<double>::zeros(3, 1);
trans = cv::Mat_<double>::zeros(3, 1);
return false;
}
return true;
}
/**
* @brief Samples a set of RANSAC camera pose hypotheses biased by neural guidance.
* @param sceneCoordinates Scene coordinate prediction (1x3xHxW).
* @param neuralGuidance Sampling weights (1x1xHxW).
* @param sampling Contains original image coordinate for each scene coordinate predicted.
* @param camMat Camera calibration matrix.
* @param maxSamplingTries Stop when no valid hypothesis can be found.
* @param ransacHypotheses RANSAC iterations.
* @param inlierThreshold RANSAC inlier threshold in px.
* @param hypotheses (output parameter) List of sampled pose hypotheses.
* @param sampledPoints (output parameter) Corresponding minimal set for each hypotheses, scene coordinate indices.
* @param imgPts (output parameter) Corresponding minimal set for each hypotheses, 2D image coordinates.
* @param objPts (output parameter) Corresponding minimal set for each hypotheses, 3D scene coordinates.
*/
inline void sampleHypotheses(
ngdsac::coord_t& sceneCoordinates,
ngdsac::ng_t& neuralGuidance,
const cv::Mat_<cv::Point2i>& sampling,
const cv::Mat_<float>& camMat,
int ransacHypotheses,
float inlierThreshold,
std::vector<ngdsac::pose_t>& hypotheses,
std::vector<std::vector<cv::Point2i>>& sampledPoints,
std::vector<std::vector<cv::Point2f>>& imgPts,
std::vector<std::vector<cv::Point3f>>& objPts)
{
int imH = sceneCoordinates.size(2);
int imW = sceneCoordinates.size(3);
// keep track of the points each hypothesis is sampled from
sampledPoints.resize(ransacHypotheses);
imgPts.resize(ransacHypotheses);
objPts.resize(ransacHypotheses);
hypotheses.resize(ransacHypotheses);
std::vector<float> wPts;
wPts.reserve(imH*imW);
for(int x = 0; x < imW; x++)
for(int y = 0; y < imH; y++)
wPts.push_back(neuralGuidance[0][0][y][x]);
std::discrete_distribution<int> multinomialDist(wPts.begin(), wPts.end());
// sample hypotheses
#pragma omp parallel for
for(unsigned h = 0; h < hypotheses.size(); h++)
{
int batchIdx = 0; // only batch size=1 supported atm
unsigned threadID = omp_get_thread_num();
std::vector<cv::Point2f> projections;
imgPts[h].clear();
objPts[h].clear();
sampledPoints[h].clear();
for(int j = 0; j < 4; j++)
{
// 2D location in the subsampled image
int ptXY = multinomialDist(ThreadRand::generators[threadID]);
int x = ptXY / imH;
int y = ptXY % imH;
// 2D location in the original RGB image
imgPts[h].push_back(sampling(y, x));
// 3D object coordinate
objPts[h].push_back(cv::Point3f(
sceneCoordinates[batchIdx][0][y][x],
sceneCoordinates[batchIdx][1][y][x],
sceneCoordinates[batchIdx][2][y][x]));
// 2D pixel location in the subsampled image
sampledPoints[h].push_back(cv::Point2i(x, y));
}
if(!ngdsac::safeSolvePnP(
objPts[h],
imgPts[h],
camMat,
cv::Mat(),
hypotheses[h].first,
hypotheses[h].second,
false,
cv::SOLVEPNP_P3P))
{
continue;
}
}
}
/**
* @brief Calculate soft inlier counts.
* @param reproErrs Image of reprojection error for each pose hypothesis.
* @param inlierThreshold RANSAC inlier threshold.
* @param inlierAlpha Alpha parameter for soft inlier counting.
* @param inlierBeta Beta parameter for soft inlier counting.
* @return List of soft inlier counts for each hypothesis.
*/
inline std::vector<double> getHypScores(
const std::vector<cv::Mat_<float>>& reproErrs,
float inlierThreshold,
float inlierAlpha,
float inlierBeta)
{
std::vector<double> scores(reproErrs.size(), 0);
#pragma omp parallel for
for(unsigned h = 0; h < reproErrs.size(); h++)
for(int x = 0; x < reproErrs[h].cols; x++)
for(int y = 0; y < reproErrs[h].rows; y++)
{
double softThreshold = inlierBeta * (reproErrs[h](y, x) - inlierThreshold);
softThreshold = 1 / (1+std::exp(-softThreshold));
scores[h] += 1 - softThreshold;
}
#pragma omp parallel for
for(unsigned h = 0; h < reproErrs.size(); h++)
{
scores[h] *= inlierAlpha / reproErrs[h].cols / reproErrs[h].rows;
}
return scores;
}
/**
* @brief Calculate image of reprojection errors.
* @param sceneCoordinates Scene coordinate prediction (1x3xHxW).
* @param hyp Pose hypothesis to calculate the errors for.
* @param sampling Contains original image coordinate for each scene coordinate predicted.
* @param camMat Camera calibration matrix.
* @param maxReproj Reprojection errors are clamped to this maximum value.
* @param jacobeanHyp Jacobean matrix with derivatives of the 6D pose wrt. the reprojection error (num pts x 6).
* @param calcJ Whether to calculate the jacobean matrix or not.
* @return Image of reprojection errors.
*/
cv::Mat_<float> getReproErrs(
ngdsac::coord_t& sceneCoordinates,
const ngdsac::pose_t& hyp,
const cv::Mat_<cv::Point2i>& sampling,
const cv::Mat& camMat,
float maxReproj,
cv::Mat_<double>& jacobeanHyp,
bool calcJ = false)
{
int batchIdx = 0; // only batch size=1 supported atm
cv::Mat_<float> reproErrs = cv::Mat_<float>::zeros(sampling.size());
std::vector<cv::Point3f> points3D;
std::vector<cv::Point2f> projections;
std::vector<cv::Point2f> points2D;
std::vector<cv::Point2f> sources2D;
// collect 2D-3D correspondences
for(int x = 0; x < sampling.cols; x++)
for(int y = 0; y < sampling.rows; y++)
{
// get 2D location of the original RGB frame
cv::Point2f pt2D(sampling(y, x).x, sampling(y, x).y);
// get associated 3D object coordinate prediction
points3D.push_back(cv::Point3f(
sceneCoordinates[batchIdx][0][y][x],
sceneCoordinates[batchIdx][1][y][x],
sceneCoordinates[batchIdx][2][y][x]));
points2D.push_back(pt2D);
sources2D.push_back(cv::Point2f(x, y));
}
if(points3D.empty()) return reproErrs;
if(!calcJ)
{
// project object coordinate into the image using the given pose
cv::projectPoints(
points3D,
hyp.first,
hyp.second,
camMat,
cv::Mat(),
projections);
}
else
{
cv::Mat_<double> projectionsJ;
cv::projectPoints(
points3D,
hyp.first,
hyp.second,
camMat,
cv::Mat(),
projections,
projectionsJ);
projectionsJ = projectionsJ.colRange(0, 6);
//assemble the jacobean of the refinement residuals
jacobeanHyp = cv::Mat_<double>::zeros(points2D.size(), 6);
cv::Mat_<double> dNdP(1, 2);
cv::Mat_<double> dNdH(1, 6);
for(unsigned ptIdx = 0; ptIdx < points2D.size(); ptIdx++)
{
double err = std::max(cv::norm(projections[ptIdx] - points2D[ptIdx]), EPS);
if(err > maxReproj)
continue;
// derivative of norm
dNdP(0, 0) = 1 / err * (projections[ptIdx].x - points2D[ptIdx].x);
dNdP(0, 1) = 1 / err * (projections[ptIdx].y - points2D[ptIdx].y);
dNdH = dNdP * projectionsJ.rowRange(2 * ptIdx, 2 * ptIdx + 2);
dNdH.copyTo(jacobeanHyp.row(ptIdx));
}
}
// measure reprojection errors
for(unsigned p = 0; p < projections.size(); p++)
{
cv::Point2f curPt = points2D[p] - projections[p];
float l = std::min((float) cv::norm(curPt), maxReproj);
reproErrs(sources2D[p].y, sources2D[p].x) = l;
}
return reproErrs;
}
/**
* @brief Refine a pose hypothesis by iteratively re-fitting it to all inliers.
* @param sceneCoordinates Scene coordinate prediction (1x3xHxW).
* @param reproErrs Original reprojection errors of the pose hypothesis, used to collect the first set of inliers.
* @param sampling Contains original image coordinate for each scene coordinate predicted.
* @param camMat Camera calibration matrix.
* @param inlierThreshold RANSAC inlier threshold.
* @param maxRefSteps Maximum refinement iterations (re-calculating inlier and refitting).
* @param maxReproj Reprojection errors are clamped to this maximum value.
* @param hypothesis (output parameter) Refined pose.
* @param inlierMap (output parameter) 2D image indicating which scene coordinate are (final) inliers.
*/
inline void refineHyp(
ngdsac::coord_t& sceneCoordinates,
const cv::Mat_<float>& reproErrs,
const cv::Mat_<cv::Point2i>& sampling,
const cv::Mat_<float>& camMat,
float inlierThreshold,
unsigned maxRefSteps,
float maxReproj,
ngdsac::pose_t& hypothesis,
cv::Mat_<int>& inlierMap)
{
cv::Mat_<float> localReproErrs = reproErrs.clone();
int batchIdx = 0; // only batch size=1 supported atm
// refine as long as inlier count increases
unsigned bestInliers = 4;
// refine current hypothesis
for(unsigned rStep = 0; rStep < maxRefSteps; rStep++)
{
// collect inliers
std::vector<cv::Point2f> localImgPts;
std::vector<cv::Point3f> localObjPts;
cv::Mat_<int> localInlierMap = cv::Mat_<int>::zeros(localReproErrs.size());
for(int x = 0; x < sampling.cols; x++)
for(int y = 0; y < sampling.rows; y++)
{
if(localReproErrs(y, x) < inlierThreshold)
{
localImgPts.push_back(sampling(y, x));
localObjPts.push_back(cv::Point3f(
sceneCoordinates[batchIdx][0][y][x],
sceneCoordinates[batchIdx][1][y][x],
sceneCoordinates[batchIdx][2][y][x]));
localInlierMap(y, x) = 1;
}
}
if(localImgPts.size() <= bestInliers)
break; // converged
bestInliers = localImgPts.size();
// recalculate pose
ngdsac::pose_t hypUpdate;
hypUpdate.first = hypothesis.first.clone();
hypUpdate.second = hypothesis.second.clone();
if(!ngdsac::safeSolvePnP(
localObjPts,
localImgPts,
camMat,
cv::Mat(),
hypUpdate.first,
hypUpdate.second,
true,
(localImgPts.size() > 4) ?
cv::SOLVEPNP_ITERATIVE :
cv::SOLVEPNP_P3P))
break; //abort if PnP fails
hypothesis = hypUpdate;
inlierMap = localInlierMap;
// recalculate pose errors
cv::Mat_<double> jacobeanDummy;
localReproErrs = ngdsac::getReproErrs(
sceneCoordinates,
hypothesis,
sampling,
camMat,
maxReproj,
jacobeanDummy);
}
}
/**
* @brief Applies soft max to the given list of scores.
* @param scores List of scores.
* @return Soft max distribution (sums to 1)
*/
std::vector<double> softMax(const std::vector<double>& scores)
{
double maxScore = 0;
for(unsigned i = 0; i < scores.size(); i++)
if(i == 0 || scores[i] > maxScore) maxScore = scores[i];
std::vector<double> sf(scores.size());
double sum = 0.0;
for(unsigned i = 0; i < scores.size(); i++)
{
sf[i] = std::exp(scores[i] - maxScore);
sum += sf[i];
}
for(unsigned i = 0; i < scores.size(); i++)
{
sf[i] /= sum;
}
return sf;
}
/**
* @brief Calculate the Shannon entropy of a discrete distribution.
* @param dist Discrete distribution. Probability per entry, should sum to 1.
* @return Shannon entropy.
*/
double entropy(const std::vector<double>& dist)
{
double e = 0;
for(unsigned i = 0; i < dist.size(); i++)
if(dist[i] > 0)
e -= dist[i] * std::log2(dist[i]);
return e;
}
/**
* @brief Sample a hypothesis index.
* @param probs Selection probabilities.
* @param training If false, do not sample, but take argmax.
* @return Hypothesis index.
*/
int draw(const std::vector<double>& probs, bool training)
{
std::map<double, int> cumProb;
double probSum = 0;
double maxProb = -1;
double maxIdx = 0;
for(unsigned idx = 0; idx < probs.size(); idx++)
{
if(probs[idx] < EPS) continue;
probSum += probs[idx];
cumProb[probSum] = idx;
if(maxProb < 0 || probs[idx] > maxProb)
{
maxProb = probs[idx];
maxIdx = idx;
}
}
if(training)
return cumProb.upper_bound(drand(0, probSum))->second;
else
return maxIdx;
}
/**
* @brief Transform scene pose (OpenCV format) to camera transformation, related by inversion.
* @param pose Scene pose in OpenCV format (i.e. axis-angle and translation).
* @return Camera transformation matrix (4x4).
*/
ngdsac::trans_t pose2trans(const ngdsac::pose_t& pose)
{
ngdsac::trans_t rot, trans = ngdsac::trans_t::eye(4, 4);
cv::Rodrigues(pose.first, rot);
rot.copyTo(trans.rowRange(0,3).colRange(0,3));
trans(0, 3) = pose.second.at<double>(0, 0);
trans(1, 3) = pose.second.at<double>(1, 0);
trans(2, 3) = pose.second.at<double>(2, 0);
return trans.inv(); // camera transformation is inverted scene pose
}
/**
* @brief Transform camera transformation to scene pose (OpenCV format), related by inversion.
* @param trans Camera transformation matrix (4x4)
* @return Scene pose in OpenCV format (i.e. axis-angle and translation).
*/
ngdsac::pose_t trans2pose(const ngdsac::trans_t& trans)
{
ngdsac::trans_t invTrans = trans.inv();
ngdsac::pose_t pose;
cv::Rodrigues(invTrans.colRange(0,3).rowRange(0,3), pose.first);
pose.second = cv::Mat_<double>(3, 1);
pose.second.at<double>(0, 0) = invTrans(0, 3);
pose.second.at<double>(1, 0) = invTrans(1, 3);
pose.second.at<double>(2, 0) = invTrans(2, 3);
return pose; // camera transformation is inverted scene pose
}
/**
* @brief Calculate the average of all matrix entries.
* @param mat Input matrix.
* @return Average of entries.
*/
double getAvg(const cv::Mat_<double>& mat)
{
double avg = 0;
int count = 0;
for(int x = 0; x < mat.cols; x++)
for(int y = 0; y < mat.rows; y++)
{
double entry = std::abs(mat(y, x));
if(entry > EPS)
{
avg += entry;
count++;
}
}
return avg / (EPS + count);
}
/**
* @brief Return the maximum entry of the given matrix.
* @param mat Input matrix.
* @return Maximum entry.
*/
double getMax(const cv::Mat_<double>& mat)
{
double m = -1;
for(int x = 0; x < mat.cols; x++)
for(int y = 0; y < mat.rows; y++)
{
double val = std::abs(mat(y, x));
if(m < 0 || val > m)
m = val;
}
return m;
}
/**
* @brief Return the median of all entries of the given matrix.
* @param mat Input matrix.
* @return Median entry.
*/
double getMed(const cv::Mat_<double>& mat)
{
std::vector<double> vals;
for(int x = 0; x < mat.cols; x++)
for(int y = 0; y < mat.rows; y++)
{
double entry = std::abs(mat(y, x));
if(entry > EPS) vals.push_back(entry);
}
if(vals.empty())
return 0;
std::sort(vals.begin(), vals.end());
return vals[vals.size() / 2];
}
}
|
GB_binop__plus_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__plus_uint8
// A.*B function (eWiseMult): GB_AemultB__plus_uint8
// A*D function (colscale): GB_AxD__plus_uint8
// D*A function (rowscale): GB_DxB__plus_uint8
// C+=B function (dense accum): GB_Cdense_accumB__plus_uint8
// C+=b function (dense accum): GB_Cdense_accumb__plus_uint8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_uint8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_uint8
// C=scalar+B GB_bind1st__plus_uint8
// C=scalar+B' GB_bind1st_tran__plus_uint8
// C=A+scalar GB_bind2nd__plus_uint8
// C=A'+scalar GB_bind2nd_tran__plus_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x + y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_UINT8 || GxB_NO_PLUS_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__plus_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__plus_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__plus_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB_bind1st_tran__plus_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB_bind2nd_tran__plus_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residual_based_bossak_velocity_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
// Suneth Warnakulasuriya
//
#if !defined(KRATOS_RESIDUAL_BASED_BOSSAK_VELOCITY_SCHEME_H_INCLUDED)
#define KRATOS_RESIDUAL_BASED_BOSSAK_VELOCITY_SCHEME_H_INCLUDED
// System includes
#include <limits>
#include <vector>
// External includes
// Project includes
#include "custom_strategies/relaxed_dof_updater.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "solving_strategies/schemes/scheme.h"
#include "utilities/time_discretization.h"
namespace Kratos
{
///@name Kratos Classes
///@{
/// A scheme for steady and dynamic equations, using Bossak time integration.
/**
* It can be used for either first- or second-order time derivatives. Elements
* and conditions must provide a specialization of SchemeExtension via
* their data value container, which allows the scheme to operate independently
* of the variable arrangements in the element or condition.
*/
template <class TSparseSpace, class TDenseSpace>
class ResidualBasedBossakVelocityScheme : public Scheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBossakVelocityScheme);
using BaseType = Scheme<TSparseSpace, TDenseSpace>;
using SystemMatrixType = typename BaseType::TSystemMatrixType;
using SystemVectorType = typename BaseType::TSystemVectorType;
using LocalSystemVectorType = typename BaseType::LocalSystemVectorType;
using LocalSystemMatrixType = typename BaseType::LocalSystemMatrixType;
using DofsArrayType = typename BaseType::DofsArrayType;
using NodeType = ModelPart::NodeType;
using IndexType = std::size_t;
///@}
///@name Life Cycle
///@{
/// Constructor.
ResidualBasedBossakVelocityScheme(
const double AlphaBossak,
const double RelaxationFactor,
const std::vector<Variable<double> const*> rDisplacementVariables,
const std::vector<Variable<double> const*> rVelocityVariables,
const std::vector<Variable<double> const*> rAccelerationVariables,
const std::vector<Variable<double> const*> rDisplacementComponentVariables,
const std::vector<Variable<double> const*> rVelocityComponentVariables,
const std::vector<Variable<double> const*> rAccelerationComponentVariables)
: mAlphaBossak(AlphaBossak),
mUpdateAcceleration(rAccelerationVariables.size() > 0 ||
rAccelerationComponentVariables.size() > 0),
mUpdateDisplacement(rDisplacementVariables.size() > 0 ||
rDisplacementComponentVariables.size() > 0),
mRelaxationFactor(RelaxationFactor),
mDisplacementVariables(rDisplacementVariables),
mVelocityVariables(rVelocityVariables),
mAccelerationVariables(rAccelerationVariables),
mDisplacementComponentVariables(rDisplacementComponentVariables),
mVelocityComponentVariables(rVelocityComponentVariables),
mAccelerationComponentVariables(rAccelerationComponentVariables)
{
KRATOS_INFO("ResidualBasedBossakVelocityScheme")
<< " Using bossak velocity scheme with alpha_bossak = " << std::scientific
<< mAlphaBossak << " [UpdateAcceleration: " << mUpdateAcceleration
<< ", UpdateDisplacement: " << mUpdateDisplacement << "]\n";
// Allocate auxiliary memory.
const int num_threads = OpenMPUtils::GetNumThreads();
mMassMatrix.resize(num_threads);
mDampingMatrix.resize(num_threads);
mValuesVector.resize(num_threads);
mSecondDerivativeValuesVector.resize(num_threads);
mSecondDerivativeValuesVectorOld.resize(num_threads);
}
/// Destructor.
~ResidualBasedBossakVelocityScheme() override = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void InitializeSolutionStep(ModelPart& rModelPart,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
const double delta_time = rModelPart.GetProcessInfo()[DELTA_TIME];
KRATOS_ERROR_IF(delta_time < std::numeric_limits<double>::epsilon())
<< "detected delta_time = 0 in the Bossak Scheme ... "
"check if the time step is created correctly for "
"the current model part.";
ResidualBasedBossakVelocityScheme::CalculateBossakConstants(
mBossak, mAlphaBossak, delta_time);
#pragma omp critical
{
rModelPart.GetProcessInfo()[BOSSAK_ALPHA] = mBossak.Alpha;
}
KRATOS_CATCH("");
}
void Update(ModelPart& rModelPart,
DofsArrayType& rDofSet,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
mpDofUpdater->UpdateDofs(rDofSet, rDx, mRelaxationFactor);
this->UpdateTimeSchemeVariables(rModelPart);
KRATOS_CATCH("");
}
void CalculateSystemContributions(Element::Pointer pCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
const int k = OpenMPUtils::ThisThread();
(pCurrentElement)->InitializeNonLinearIteration(rCurrentProcessInfo);
(pCurrentElement)->CalculateLocalSystem(rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
(pCurrentElement)->CalculateLocalVelocityContribution(mDampingMatrix[k], rRHS_Contribution, rCurrentProcessInfo);
if (mUpdateAcceleration)
{
(pCurrentElement)->CalculateMassMatrix(mMassMatrix[k], rCurrentProcessInfo);
AddDynamicsToRHS(pCurrentElement, rRHS_Contribution, mDampingMatrix[k],
mMassMatrix[k], rCurrentProcessInfo);
}
AddDynamicsToLHS(rLHS_Contribution, mDampingMatrix[k], mMassMatrix[k],
rCurrentProcessInfo);
(pCurrentElement)->EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Calculate_RHS_Contribution(Element::Pointer pCurrentElement,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
const int k = OpenMPUtils::ThisThread();
// Initializing the non linear iteration for the current element
(pCurrentElement)->InitializeNonLinearIteration(rCurrentProcessInfo);
// basic operations for the element considered
(pCurrentElement)->CalculateRightHandSide(rRHS_Contribution, rCurrentProcessInfo);
(pCurrentElement)->CalculateLocalVelocityContribution(mDampingMatrix[k], rRHS_Contribution, rCurrentProcessInfo);
(pCurrentElement)->EquationIdVector(rEquationId, rCurrentProcessInfo);
// adding the dynamic contributions (static is already included)
if (mUpdateAcceleration)
{
(pCurrentElement)->CalculateMassMatrix(mMassMatrix[k], rCurrentProcessInfo);
AddDynamicsToRHS(pCurrentElement, rRHS_Contribution, mDampingMatrix[k],
mMassMatrix[k], rCurrentProcessInfo);
}
}
void Condition_CalculateSystemContributions(Condition::Pointer pCurrentCondition,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Condition::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY
const int k = OpenMPUtils::ThisThread();
(pCurrentCondition)->InitializeNonLinearIteration(rCurrentProcessInfo);
(pCurrentCondition)->CalculateLocalSystem(rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
(pCurrentCondition)->CalculateLocalVelocityContribution(mDampingMatrix[k], rRHS_Contribution, rCurrentProcessInfo);
(pCurrentCondition)->EquationIdVector(rEquationId, rCurrentProcessInfo);
if (mUpdateAcceleration)
{
(pCurrentCondition)->CalculateMassMatrix(mMassMatrix[k], rCurrentProcessInfo);
AddDynamicsToRHS(pCurrentCondition, rRHS_Contribution,
mDampingMatrix[k], mMassMatrix[k], rCurrentProcessInfo);
}
AddDynamicsToLHS(rLHS_Contribution, mDampingMatrix[k], mMassMatrix[k],
rCurrentProcessInfo);
KRATOS_CATCH("")
}
void Condition_Calculate_RHS_Contribution(Condition::Pointer pCurrentCondition,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
const int k = OpenMPUtils::ThisThread();
(pCurrentCondition)->InitializeNonLinearIteration(rCurrentProcessInfo);
(pCurrentCondition)->CalculateRightHandSide(rRHS_Contribution, rCurrentProcessInfo);
(pCurrentCondition)->CalculateLocalVelocityContribution(mDampingMatrix[k], rRHS_Contribution, rCurrentProcessInfo);
(pCurrentCondition)->EquationIdVector(rEquationId, rCurrentProcessInfo);
// adding the dynamic contributions (static is already included)
if (mUpdateAcceleration)
{
(pCurrentCondition)->CalculateMassMatrix(mMassMatrix[k], rCurrentProcessInfo);
AddDynamicsToRHS(pCurrentCondition, rRHS_Contribution,
mDampingMatrix[k], mMassMatrix[k], rCurrentProcessInfo);
}
KRATOS_CATCH("");
}
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBossakVelocityScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
struct BossakConstants
{
double Alpha;
double Gamma;
double Beta;
double C0;
double C1;
double C2;
double C3;
double C4;
double C5;
double C6;
};
///@}
///@name Protected member Variables
///@{
std::vector<LocalSystemVectorType> mSecondDerivativeValuesVectorOld;
std::vector<LocalSystemVectorType> mSecondDerivativeValuesVector;
std::vector<LocalSystemVectorType> mValuesVector;
std::vector<LocalSystemMatrixType> mMassMatrix;
std::vector<LocalSystemMatrixType> mDampingMatrix;
const double mAlphaBossak;
bool mUpdateAcceleration;
bool mUpdateDisplacement;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
//****************************************************************************
/**
Kdyn = am*M + D + a1*K
*/
void AddDynamicsToLHS(LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rDampingMatrix,
LocalSystemMatrixType& rMassMatrix,
ProcessInfo& CurrentProcessInfo)
{
// multipling time scheme factor
rLHS_Contribution *= mBossak.C1;
// adding mass contribution to the dynamic stiffness
if (rMassMatrix.size1() != 0 && mUpdateAcceleration) // if M matrix declared
{
noalias(rLHS_Contribution) += mBossak.C0 * rMassMatrix;
}
// adding damping contribution
if (rDampingMatrix.size1() != 0) // if M matrix declared
{
noalias(rLHS_Contribution) += rDampingMatrix;
}
}
//****************************************************************************
/// Add Bossak contributions from the inertial term to the RHS vector.
/** This essentially performs bdyn = b - M*acc for the current element.
* Note that viscous/pressure contributions to the RHS are expected to be added by the element itself.
* @param[in] rCurrentElement The fluid element we are assembling.
* @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added.
* @param[in] rD The elemental velocity/pressure LHS matrix.
* @param[in] rM The elemental acceleration LHS matrix.
* @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart.
*/
void AddDynamicsToRHS(Element::Pointer rCurrentElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rDampingMatrix,
LocalSystemMatrixType& rMassMatrix,
ProcessInfo& rCurrentProcessInfo)
{
// adding inertia contribution
if (rMassMatrix.size1() != 0)
{
const int k = OpenMPUtils::ThisThread();
rCurrentElement->GetSecondDerivativesVector(
mSecondDerivativeValuesVector[k], 0);
(mSecondDerivativeValuesVector[k]) *= (1.00 - mBossak.Alpha);
rCurrentElement->GetSecondDerivativesVector(
mSecondDerivativeValuesVectorOld[k], 1);
noalias(mSecondDerivativeValuesVector[k]) +=
mBossak.Alpha * mSecondDerivativeValuesVectorOld[k];
noalias(rRHS_Contribution) -=
prod(rMassMatrix, mSecondDerivativeValuesVector[k]);
}
}
/// Add Bossak contributions from the inertial term to the RHS vector.
/** This essentially performs bdyn = b - M*acc for the current condition.
* Note that viscous/pressure contributions to the RHS are expected to be added by the element condition.
* @param[in] rCurrentCondition The fluid condition we are assembling.
* @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added.
* @param[in] rD The elemental velocity/pressure LHS matrix.
* @param[in] rM The elemental acceleration LHS matrix.
* @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart.
*/
void AddDynamicsToRHS(Condition::Pointer rCurrentCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rDampingMatrix,
LocalSystemMatrixType& rMassMatrix,
ProcessInfo& rCurrentProcessInfo)
{
// adding inertia contribution
if (rMassMatrix.size1() != 0)
{
const int k = OpenMPUtils::ThisThread();
rCurrentCondition->GetSecondDerivativesVector(
mSecondDerivativeValuesVector[k], 0);
(mSecondDerivativeValuesVector[k]) *= (1.00 - mBossak.Alpha);
rCurrentCondition->GetSecondDerivativesVector(
mSecondDerivativeValuesVectorOld[k], 1);
noalias(mSecondDerivativeValuesVector[k]) +=
mBossak.Alpha * mSecondDerivativeValuesVectorOld[k];
noalias(rRHS_Contribution) -=
prod(rMassMatrix, mSecondDerivativeValuesVector[k]);
}
}
void UpdateTimeSchemeVariables(ModelPart& rModelPart)
{
KRATOS_TRY;
UpdateAcceleration<Variable<double>>(rModelPart, mVelocityVariables,
mAccelerationVariables);
UpdateAcceleration<Variable<double>>(
rModelPart, mVelocityComponentVariables, mAccelerationComponentVariables);
UpdateDisplacement<Variable<double>>(rModelPart, mDisplacementVariables,
mVelocityVariables, mAccelerationVariables);
UpdateDisplacement<Variable<double>>(
rModelPart, mDisplacementComponentVariables,
mVelocityComponentVariables, mAccelerationComponentVariables);
KRATOS_CATCH("");
}
void UpdateAcceleration(double& rCurrentAcceleration,
const double CurrentVelocity,
const double OldVelocity,
const double OldAcceleration) const
{
rCurrentAcceleration = mBossak.C2 * (CurrentVelocity - OldVelocity) -
mBossak.C3 * OldAcceleration;
}
void UpdateDisplacement(double& rCurrentDisplacement,
const double OldDisplacement,
const double OldVelocity,
const double CurrentAcceleration,
const double OldAcceleration) const
{
rCurrentDisplacement = OldDisplacement + mBossak.C6 * OldVelocity +
mBossak.C4 * OldAcceleration + mBossak.C5 * CurrentAcceleration;
}
static void CalculateBossakConstants(BossakConstants& rBossakConstants,
const double Alpha,
const double DeltaTime)
{
TimeDiscretization::Bossak bossak(Alpha, 0.25, 0.5);
rBossakConstants.Alpha = bossak.GetAlphaM();
rBossakConstants.Gamma = bossak.GetGamma();
rBossakConstants.Beta = bossak.GetBeta();
rBossakConstants.C0 =
(1.0 - rBossakConstants.Alpha) / (rBossakConstants.Gamma * DeltaTime);
rBossakConstants.C1 =
DeltaTime / (rBossakConstants.Beta * rBossakConstants.Gamma);
rBossakConstants.C2 = 1.0 / (rBossakConstants.Gamma * DeltaTime);
rBossakConstants.C3 = (1.0 - rBossakConstants.Gamma) / rBossakConstants.Gamma;
rBossakConstants.C4 =
std::pow(DeltaTime, 2) * (-2.0 * rBossakConstants.Beta + 1.0) / 2.0;
rBossakConstants.C5 = std::pow(DeltaTime, 2) * rBossakConstants.Beta;
rBossakConstants.C6 = DeltaTime;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
using DofUpdaterType = RelaxedDofUpdater<TSparseSpace>;
using DofUpdaterPointerType = typename DofUpdaterType::UniquePointer;
DofUpdaterPointerType mpDofUpdater = Kratos::make_unique<DofUpdaterType>();
double mRelaxationFactor;
const std::vector<Variable<double> const*> mDisplacementVariables;
const std::vector<Variable<double> const*> mVelocityVariables;
const std::vector<Variable<double> const*> mAccelerationVariables;
const std::vector<Variable<double> const*> mDisplacementComponentVariables;
const std::vector<Variable<double> const*> mVelocityComponentVariables;
const std::vector<Variable<double> const*> mAccelerationComponentVariables;
BossakConstants mBossak;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
// class to hold all the derivatives for updated target variable
template <class TVariableType>
void UpdateAcceleration(ModelPart& rModelPart,
const std::vector<TVariableType const*>& pVelocityVariables,
const std::vector<TVariableType const*>& pAccelerationVariables)
{
if (!mUpdateAcceleration)
return;
const int number_of_nodes = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i_node = 0; i_node < number_of_nodes; ++i_node)
{
NodeType& r_node = *(rModelPart.NodesBegin() + i_node);
for (IndexType i_var = 0; i_var < pAccelerationVariables.size(); ++i_var)
{
double& r_current_acceleration =
r_node.FastGetSolutionStepValue(*pAccelerationVariables[i_var]);
const double old_acceleration = r_node.FastGetSolutionStepValue(
*pAccelerationVariables[i_var], 1);
const double current_velocity =
r_node.FastGetSolutionStepValue(*pVelocityVariables[i_var]);
const double old_velocity =
r_node.FastGetSolutionStepValue(*pVelocityVariables[i_var], 1);
UpdateAcceleration(r_current_acceleration, current_velocity,
old_velocity, old_acceleration);
}
}
}
template <class TVariableType>
void UpdateDisplacement(ModelPart& rModelPart,
const std::vector<TVariableType const*>& pDisplacementVariables,
const std::vector<TVariableType const*>& pVelocityVariables,
const std::vector<TVariableType const*>& pAccelerationVariables)
{
if (!mUpdateDisplacement)
return;
const int number_of_nodes = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i_node = 0; i_node < number_of_nodes; ++i_node)
{
NodeType& r_node = *(rModelPart.NodesBegin() + i_node);
for (IndexType i_var = 0; i_var < pDisplacementVariables.size(); ++i_var)
{
double& r_current_displacement =
r_node.FastGetSolutionStepValue(*pDisplacementVariables[i_var]);
const double old_displacement = r_node.FastGetSolutionStepValue(
*pDisplacementVariables[i_var], 1);
const double current_acceleration =
r_node.FastGetSolutionStepValue(*pAccelerationVariables[i_var]);
const double old_acceleration = r_node.FastGetSolutionStepValue(
*pAccelerationVariables[i_var], 1);
const double old_velocity =
r_node.FastGetSolutionStepValue(*pVelocityVariables[i_var], 1);
UpdateDisplacement(r_current_displacement, old_displacement, old_velocity,
current_acceleration, old_acceleration);
}
}
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBossakVelocityScheme */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BOSSAK_VELOCITY_SCHEME_H_INCLUDED defined */
|
burgers1d.c | #ifndef TAPENADE
#include <math.h>
#else
double fmax(double a, double b) {
if(a>b) return a;
else return b;
}
double fmin(double a, double b) {
if(a<b) return a;
else return b;
}
#endif
#define Max(x,y) fmax(x,y)
#define Min(x,y) fmin(x,y)
#define Heaviside(x) ((x>=0)?1.0:0.0)
#define u(x) u[x]
#define u_1(x) u_1[x]
void burgers1d(double* u, double* u_1, double D, double C, int n) {
int i;
#pragma omp parallel for private(i)
for ( i=1; i<=n - 2; i++ ) {
u(i) += -C*((-u_1(i) + u_1(i + 1))*Min(0, u_1(i)) + (u_1(i) - u_1(i - 1))*Max(0, u_1(i))) + D*(-2.0*u_1(i) + u_1(i - 1) + u_1(i + 1)) + u_1(i);
}
}
|
map-3.c | struct S { int i : 1; int j : 4; long long k : 25; };
void bar (struct S, int);
#pragma omp declare target to (bar)
void
foo (struct S a, struct S b, struct S c, struct S d)
{
#pragma omp target map (a)
bar (a, 0);
#pragma omp target map (a) map (b.i) /* { dg-error "bit-field .b.\(S::\|\)i. in .map. clause" } */
bar (a, b.i);
#pragma omp target map (a) map (b.j) /* { dg-error "bit-field .b.\(S::\|\)j. in .map. clause" } */
bar (a, b.j);
#pragma omp target map (a) map (b.k) /* { dg-error "bit-field .b.\(S::\|\)k. in .map. clause" } */
bar (a, b.k);
#pragma omp target data map (a) map (b.i) /* { dg-error "bit-field .b.\(S::\|\)i. in .map. clause" } */
{
#pragma omp target enter data map (alloc: a) map (to: c.j) /* { dg-error "bit-field .c.\(S::\|\)j. in .map. clause" } */
#pragma omp target exit data map (release: a) map (from: d.k) /* { dg-error "bit-field .d.\(S::\|\)k. in .map. clause" } */
}
}
|
kmeans_h2o4gpu.h | /*!
* Copyright 2017-2018 H2O.ai, Inc.
* License Apache License Version 2.0 (see LICENSE for details)
*/
#pragma once
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include "kmeans_labels.h"
#include "kmeans_centroids.h"
template<typename T>
struct count_functor {
T* pairwise_distances_ptr;
int* counts_ptr;
int k;
int rows_per_run;
count_functor(T* _pairwise_distances_ptr, int* _counts_ptr, int _k, int _rows_per_run) {
pairwise_distances_ptr = _pairwise_distances_ptr;
counts_ptr = _counts_ptr;
k = _k;
rows_per_run = _rows_per_run;
}
__device__
void operator()(int idx) const {
int closest_centroid_idx = 0;
T best_distance = pairwise_distances_ptr[idx];
// FIXME potentially slow due to striding
for (int i = 1; i < k; i++) {
T distance = pairwise_distances_ptr[idx + i * rows_per_run];
if (distance < best_distance) {
best_distance = distance;
closest_centroid_idx = i;
}
}
atomicAdd(&counts_ptr[closest_centroid_idx], 1);
}
};
/**
* Calculates closest centroid for each record and counts how many points are assigned to each centroid.
* @tparam T
* @param verbose
* @param num_gpu
* @param rows_per_gpu
* @param cols
* @param data
* @param data_dots
* @param centroids
* @param weights
* @param pairwise_distances
* @param labels
*/
template<typename T>
void count_pts_per_centroid(
int verbose,
int num_gpu, int rows_per_gpu, int cols,
thrust::device_vector<T> **data,
thrust::device_vector<T> **data_dots,
thrust::host_vector<T> centroids,
thrust::host_vector<T> &weights
) {
int k = centroids.size() / cols;
#pragma omp parallel for
for (int i = 0; i < num_gpu; i++) {
thrust::host_vector<int> weights_tmp(weights.size());
CUDACHECK(cudaSetDevice(i));
thrust::device_vector<T> centroid_dots(k);
thrust::device_vector<T> d_centroids = centroids;
thrust::device_vector<int> counts(k);
kmeans::detail::batch_calculate_distances(verbose, 0, rows_per_gpu, cols, k,
*data[i], d_centroids, *data_dots[i], centroid_dots,
[&](int rows_per_run, size_t offset, thrust::device_vector<T> &pairwise_distances) {
auto counting = thrust::make_counting_iterator(0);
auto counts_ptr = thrust::raw_pointer_cast(counts.data());
auto pairwise_distances_ptr = thrust::raw_pointer_cast(pairwise_distances.data());
thrust::for_each(counting,
counting + rows_per_run,
count_functor<T>(pairwise_distances_ptr, counts_ptr, k, rows_per_run)
);
}
);
kmeans::detail::memcpy(weights_tmp, counts);
kmeans::detail::streamsync(i);
for (int p = 0; p < k; p++) {
weights[p] += weights_tmp[p];
}
}
} |
sections-1.c | /* { dg-do compile } */
extern void bar(int);
void f1(void)
{
#pragma omp sections nowait
{
bar (1);
#pragma omp section
bar (2);
#pragma omp section
bar (3);
#pragma omp section
bar (4);
#pragma omp section
bar (5);
}
}
void f2(void)
{
#pragma omp sections
{
#pragma omp section
{
bar (1);
bar (1);
}
#pragma omp section
bar (2);
#pragma omp section
bar (3);
#pragma omp section
bar (4);
#pragma omp section
bar (5);
}
}
|
GB_subassign_04.c | //------------------------------------------------------------------------------
// GB_subassign_04: C(I,J) += A ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 04: C(I,J) += A ; using S
// M: NULL
// Mask_comp: false
// C_replace: false
// accum: present
// A: matrix
// S: constructed
// C: not bitmap: use GB_bitmap_assign instead
// A: any sparsity structure.
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_04
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_BinaryOp accum,
const GrB_Matrix A,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (A) ;
GB_GET_C ; // C must not be bitmap
GB_GET_A ;
GB_GET_S ;
GB_GET_ACCUM ;
//--------------------------------------------------------------------------
// Method 04: C(I,J) += A ; using S
//--------------------------------------------------------------------------
// Time: Close to Optimal. Every entry in A must be visited, and the
// corresponding entry in S must then be found. Time for this phase is
// Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S))
// time. This method simply traverses all of A+S (like GB_add for
// computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)).
// The only difference is that the traversal of A+S can terminate if A is
// exhausted. Entries in S but not A do not actually require any work
// (unlike Method 02, which must visit all entries in A+S).
// Method 02 and Method 04 are somewhat similar. They differ on how C is
// modified when the entry is present in S but not A.
// TODO: phase2 of Method 02 and 04 are identical and could be
// done in a single function.
// Compare with Method 16, which computes C(I,J)<!M> += A, using S.
//--------------------------------------------------------------------------
// Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all A+S
GB_SUBASSIGN_TWO_SLICE (A, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (Sfound && !Afound)
{
// ----[C . 1] or [X . 1]-------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( C ): no change, with accum
// [X . 1]: action: ( X ): still a zombie
GB_NEXT (S) ;
}
else if (!Sfound && Afound)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
task_pending++ ;
}
else if (Sfound && Afound)
{
// ----[C A 1] or [X A 1]-------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_matrix ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
// ----[C . 1] or [X . 1]-------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( C ): no change, with accum
// [X . 1]: action: ( X ): still a zombie
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
task_pending++ ;
GB_NEXT (A) ;
}
else
{
// ----[C A 1] or [X A 1]-------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_matrix ;
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S (:,j)
// List A (:,j) has entries. List S (:,j) exhausted.
task_pending += (pA_end - pA) ;
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (!Sfound && Afound)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
GB_NEXT (A) ;
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
GB_NEXT (A) ;
}
else
{
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S (:,j)
// while list A (:,j) has entries. List S (:,j) exhausted.
while (pA < pA_end)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iA = GBI (Ai, pA, Avlen) ;
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
GB_NEXT (A) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(1,ceild(8*t2-Nz+9,4)),t1+1);t3<=min(floord(4*Nt+Ny-9,4),floord(4*t1+Ny-1,4));t3++) {
for (t4=max(max(ceild(t1-62,64),ceild(8*t2-Nz-243,256)),ceild(4*t3-Ny-243,256));t4<=min(min(floord(4*Nt+Nx-9,256),floord(4*t1+Nx-1,256)),floord(4*t3+Nx-9,256));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),t3-1),64*t4+62);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(256*t4,4*t5+4);
ubv=min(256*t4+255,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
lbm.c | /* $Id: lbm.c,v 1.1 2008/03/04 17:30:02 stratton Exp $ */
/*############################################################################*/
#include "lbm.h"
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#if !defined(SPEC_CPU)
#ifdef _OPENMP
#include <omp.h>
#endif
#endif
/*############################################################################*/
#define DFL1 (1.0/ 3.0)
#define DFL2 (1.0/18.0)
#define DFL3 (1.0/36.0)
/*############################################################################*/
void LBM_allocateGrid( float** ptr ) {
const size_t margin = 2*SIZE_X*SIZE_Y*N_CELL_ENTRIES,
size = sizeof( LBM_Grid ) + 2*margin*sizeof( float );
*ptr = malloc( size );
if( ! *ptr ) {
printf( "LBM_allocateGrid: could not allocate %.1f MByte\n",
size / (1024.0*1024.0) );
exit( 1 );
}
#if !defined(SPEC_CPU)
printf( "LBM_allocateGrid: allocated %.1f MByte\n",
size / (1024.0*1024.0) );
#endif
*ptr += margin;
}
/*############################################################################*/
void LBM_freeGrid( float** ptr ) {
const size_t margin = 2*SIZE_X*SIZE_Y*N_CELL_ENTRIES;
free( *ptr-margin );
*ptr = NULL;
}
/*############################################################################*/
void LBM_initializeGrid( LBM_Grid grid ) {
SWEEP_VAR
/*voption indep*/
#if !defined(SPEC_CPU)
#ifdef _OPENMP
#pragma omp parallel for
#endif
#endif
SWEEP_START( 0, 0, -2, 0, 0, SIZE_Z+2 )
LOCAL( grid, C ) = DFL1;
LOCAL( grid, N ) = DFL2;
LOCAL( grid, S ) = DFL2;
LOCAL( grid, E ) = DFL2;
LOCAL( grid, W ) = DFL2;
LOCAL( grid, T ) = DFL2;
LOCAL( grid, B ) = DFL2;
LOCAL( grid, NE ) = DFL3;
LOCAL( grid, NW ) = DFL3;
LOCAL( grid, SE ) = DFL3;
LOCAL( grid, SW ) = DFL3;
LOCAL( grid, NT ) = DFL3;
LOCAL( grid, NB ) = DFL3;
LOCAL( grid, ST ) = DFL3;
LOCAL( grid, SB ) = DFL3;
LOCAL( grid, ET ) = DFL3;
LOCAL( grid, EB ) = DFL3;
LOCAL( grid, WT ) = DFL3;
LOCAL( grid, WB ) = DFL3;
CLEAR_ALL_FLAGS_SWEEP( grid );
SWEEP_END
}
/*############################################################################*/
void LBM_swapGrids( LBM_GridPtr* grid1, LBM_GridPtr* grid2 ) {
LBM_GridPtr aux = *grid1;
*grid1 = *grid2;
*grid2 = aux;
}
/*############################################################################*/
void LBM_loadObstacleFile( LBM_Grid grid, const char* filename ) {
int x, y, z;
FILE* file = fopen( filename, "rb" );
for( z = 0; z < SIZE_Z; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( fgetc( file ) != '.' ) SET_FLAG( grid, x, y, z, OBSTACLE );
}
fgetc( file );
}
fgetc( file );
}
fclose( file );
}
/*############################################################################*/
void LBM_initializeSpecialCellsForLDC( LBM_Grid grid ) {
int x, y, z;
/*voption indep*/
#if !defined(SPEC_CPU)
#ifdef _OPENMP
#pragma omp parallel for private( x, y )
#endif
#endif
for( z = -2; z < SIZE_Z+2; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( x == 0 || x == SIZE_X-1 ||
y == 0 || y == SIZE_Y-1 ||
z == 0 || z == SIZE_Z-1 ) {
SET_FLAG( grid, x, y, z, OBSTACLE );
}
else {
if( (z == 1 || z == SIZE_Z-2) &&
x > 1 && x < SIZE_X-2 &&
y > 1 && y < SIZE_Y-2 ) {
SET_FLAG( grid, x, y, z, ACCEL );
}
}
}
}
}
}
/*############################################################################*/
void LBM_initializeSpecialCellsForChannel( LBM_Grid grid ) {
int x, y, z;
/*voption indep*/
#if !defined(SPEC_CPU)
#ifdef _OPENMP
#pragma omp parallel for private( x, y )
#endif
#endif
for( z = -2; z < SIZE_Z+2; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( x == 0 || x == SIZE_X-1 ||
y == 0 || y == SIZE_Y-1 ) {
SET_FLAG( grid, x, y, z, OBSTACLE );
if( (z == 0 || z == SIZE_Z-1) &&
! TEST_FLAG( grid, x, y, z, OBSTACLE ))
SET_FLAG( grid, x, y, z, IN_OUT_FLOW );
}
}
}
}
}
/*############################################################################*/
void LBM_performStreamCollide( LBM_Grid srcGrid, LBM_Grid dstGrid ) {
SWEEP_VAR
float ux, uy, uz, u2, rho;
/*voption indep*/
#if !defined(SPEC_CPU)
#ifdef _OPENMP
#pragma omp parallel for private( ux, uy, uz, u2, rho )
#endif
#endif
SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )
if( TEST_FLAG_SWEEP( srcGrid, OBSTACLE )) {
DST_C ( dstGrid ) = SRC_C ( srcGrid );
DST_S ( dstGrid ) = SRC_N ( srcGrid );
DST_N ( dstGrid ) = SRC_S ( srcGrid );
DST_W ( dstGrid ) = SRC_E ( srcGrid );
DST_E ( dstGrid ) = SRC_W ( srcGrid );
DST_B ( dstGrid ) = SRC_T ( srcGrid );
DST_T ( dstGrid ) = SRC_B ( srcGrid );
DST_SW( dstGrid ) = SRC_NE( srcGrid );
DST_SE( dstGrid ) = SRC_NW( srcGrid );
DST_NW( dstGrid ) = SRC_SE( srcGrid );
DST_NE( dstGrid ) = SRC_SW( srcGrid );
DST_SB( dstGrid ) = SRC_NT( srcGrid );
DST_ST( dstGrid ) = SRC_NB( srcGrid );
DST_NB( dstGrid ) = SRC_ST( srcGrid );
DST_NT( dstGrid ) = SRC_SB( srcGrid );
DST_WB( dstGrid ) = SRC_ET( srcGrid );
DST_WT( dstGrid ) = SRC_EB( srcGrid );
DST_EB( dstGrid ) = SRC_WT( srcGrid );
DST_ET( dstGrid ) = SRC_WB( srcGrid );
continue;
}
rho = + SRC_C ( srcGrid ) + SRC_N ( srcGrid )
+ SRC_S ( srcGrid ) + SRC_E ( srcGrid )
+ SRC_W ( srcGrid ) + SRC_T ( srcGrid )
+ SRC_B ( srcGrid ) + SRC_NE( srcGrid )
+ SRC_NW( srcGrid ) + SRC_SE( srcGrid )
+ SRC_SW( srcGrid ) + SRC_NT( srcGrid )
+ SRC_NB( srcGrid ) + SRC_ST( srcGrid )
+ SRC_SB( srcGrid ) + SRC_ET( srcGrid )
+ SRC_EB( srcGrid ) + SRC_WT( srcGrid )
+ SRC_WB( srcGrid );
ux = + SRC_E ( srcGrid ) - SRC_W ( srcGrid )
+ SRC_NE( srcGrid ) - SRC_NW( srcGrid )
+ SRC_SE( srcGrid ) - SRC_SW( srcGrid )
+ SRC_ET( srcGrid ) + SRC_EB( srcGrid )
- SRC_WT( srcGrid ) - SRC_WB( srcGrid );
uy = + SRC_N ( srcGrid ) - SRC_S ( srcGrid )
+ SRC_NE( srcGrid ) + SRC_NW( srcGrid )
- SRC_SE( srcGrid ) - SRC_SW( srcGrid )
+ SRC_NT( srcGrid ) + SRC_NB( srcGrid )
- SRC_ST( srcGrid ) - SRC_SB( srcGrid );
uz = + SRC_T ( srcGrid ) - SRC_B ( srcGrid )
+ SRC_NT( srcGrid ) - SRC_NB( srcGrid )
+ SRC_ST( srcGrid ) - SRC_SB( srcGrid )
+ SRC_ET( srcGrid ) - SRC_EB( srcGrid )
+ SRC_WT( srcGrid ) - SRC_WB( srcGrid );
ux /= rho;
uy /= rho;
uz /= rho;
if( TEST_FLAG_SWEEP( srcGrid, ACCEL )) {
ux = 0.005f;
uy = 0.002f;
uz = 0.000f;
}
u2 = 1.5f * (ux*ux + uy*uy + uz*uz);
DST_C ( dstGrid ) = (1.0f-OMEGA)*SRC_C ( srcGrid ) + DFL1*OMEGA*rho*(1.0f - u2);
DST_N ( dstGrid ) = (1.0f-OMEGA)*SRC_N ( srcGrid ) + DFL2*OMEGA*rho*(1.0f + uy*(4.5f*uy + 3.0f) - u2);
DST_S ( dstGrid ) = (1.0f-OMEGA)*SRC_S ( srcGrid ) + DFL2*OMEGA*rho*(1.0f + uy*(4.5f*uy - 3.0f) - u2);
DST_E ( dstGrid ) = (1.0f-OMEGA)*SRC_E ( srcGrid ) + DFL2*OMEGA*rho*(1.0f + ux*(4.5f*ux + 3.0f) - u2);
DST_W ( dstGrid ) = (1.0f-OMEGA)*SRC_W ( srcGrid ) + DFL2*OMEGA*rho*(1.0f + ux*(4.5f*ux - 3.0f) - u2);
DST_T ( dstGrid ) = (1.0f-OMEGA)*SRC_T ( srcGrid ) + DFL2*OMEGA*rho*(1.0f + uz*(4.5f*uz + 3.0f) - u2);
DST_B ( dstGrid ) = (1.0f-OMEGA)*SRC_B ( srcGrid ) + DFL2*OMEGA*rho*(1.0f + uz*(4.5f*uz - 3.0f) - u2);
DST_NE( dstGrid ) = (1.0f-OMEGA)*SRC_NE( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (+ux+uy)*(4.5f*(+ux+uy) + 3.0f) - u2);
DST_NW( dstGrid ) = (1.0f-OMEGA)*SRC_NW( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (-ux+uy)*(4.5f*(-ux+uy) + 3.0f) - u2);
DST_SE( dstGrid ) = (1.0f-OMEGA)*SRC_SE( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (+ux-uy)*(4.5f*(+ux-uy) + 3.0f) - u2);
DST_SW( dstGrid ) = (1.0f-OMEGA)*SRC_SW( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (-ux-uy)*(4.5f*(-ux-uy) + 3.0f) - u2);
DST_NT( dstGrid ) = (1.0f-OMEGA)*SRC_NT( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (+uy+uz)*(4.5f*(+uy+uz) + 3.0f) - u2);
DST_NB( dstGrid ) = (1.0f-OMEGA)*SRC_NB( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (+uy-uz)*(4.5f*(+uy-uz) + 3.0f) - u2);
DST_ST( dstGrid ) = (1.0f-OMEGA)*SRC_ST( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (-uy+uz)*(4.5f*(-uy+uz) + 3.0f) - u2);
DST_SB( dstGrid ) = (1.0f-OMEGA)*SRC_SB( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (-uy-uz)*(4.5f*(-uy-uz) + 3.0f) - u2);
DST_ET( dstGrid ) = (1.0f-OMEGA)*SRC_ET( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (+ux+uz)*(4.5f*(+ux+uz) + 3.0f) - u2);
DST_EB( dstGrid ) = (1.0f-OMEGA)*SRC_EB( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (+ux-uz)*(4.5f*(+ux-uz) + 3.0f) - u2);
DST_WT( dstGrid ) = (1.0f-OMEGA)*SRC_WT( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (-ux+uz)*(4.5f*(-ux+uz) + 3.0f) - u2);
DST_WB( dstGrid ) = (1.0f-OMEGA)*SRC_WB( srcGrid ) + DFL3*OMEGA*rho*(1.0f + (-ux-uz)*(4.5f*(-ux-uz) + 3.0f) - u2);
SWEEP_END
}
/*############################################################################*/
void LBM_handleInOutFlow( LBM_Grid srcGrid ) {
float ux , uy , uz , rho ,
ux1, uy1, uz1, rho1,
ux2, uy2, uz2, rho2,
u2, px, py;
SWEEP_VAR
/* inflow */
/*voption indep*/
#if !defined(SPEC_CPU)
#ifdef _OPENMP
#pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \
ux2, uy2, uz2, rho2, u2, px, py )
#endif
#endif
SWEEP_START( 0, 0, 0, 0, 0, 1 )
rho1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, N )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, E )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, T )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NE )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SE )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NT )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, ST )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, ET )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, WT )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, WB );
rho2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, N )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, E )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, T )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NE )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SE )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NT )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, ST )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, ET )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, WT )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, WB );
rho = 2.0*rho1 - rho2;
px = (SWEEP_X / (0.5*(SIZE_X-1))) - 1.0;
py = (SWEEP_Y / (0.5*(SIZE_Y-1))) - 1.0;
ux = 0.00;
uy = 0.00;
uz = 0.01 * (1.0-px*px) * (1.0-py*py);
u2 = 1.5 * (ux*ux + uy*uy + uz*uz);
LOCAL( srcGrid, C ) = DFL1*rho*(1.0 - u2);
LOCAL( srcGrid, N ) = DFL2*rho*(1.0 + uy*(4.5*uy + 3.0) - u2);
LOCAL( srcGrid, S ) = DFL2*rho*(1.0 + uy*(4.5*uy - 3.0) - u2);
LOCAL( srcGrid, E ) = DFL2*rho*(1.0 + ux*(4.5*ux + 3.0) - u2);
LOCAL( srcGrid, W ) = DFL2*rho*(1.0 + ux*(4.5*ux - 3.0) - u2);
LOCAL( srcGrid, T ) = DFL2*rho*(1.0 + uz*(4.5*uz + 3.0) - u2);
LOCAL( srcGrid, B ) = DFL2*rho*(1.0 + uz*(4.5*uz - 3.0) - u2);
LOCAL( srcGrid, NE) = DFL3*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2);
LOCAL( srcGrid, NW) = DFL3*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2);
LOCAL( srcGrid, SE) = DFL3*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2);
LOCAL( srcGrid, SW) = DFL3*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2);
LOCAL( srcGrid, NT) = DFL3*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2);
LOCAL( srcGrid, NB) = DFL3*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2);
LOCAL( srcGrid, ST) = DFL3*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2);
LOCAL( srcGrid, SB) = DFL3*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2);
LOCAL( srcGrid, ET) = DFL3*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2);
LOCAL( srcGrid, EB) = DFL3*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2);
LOCAL( srcGrid, WT) = DFL3*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2);
LOCAL( srcGrid, WB) = DFL3*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2);
SWEEP_END
/* outflow */
/*voption indep*/
#if !defined(SPEC_CPU)
#ifdef _OPENMP
#pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \
ux2, uy2, uz2, rho2, u2, px, py )
#endif
#endif
SWEEP_START( 0, 0, SIZE_Z-1, 0, 0, SIZE_Z )
rho1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB );
ux1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB )
- GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB );
uy1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW )
- GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB )
- GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB );
uz1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB );
ux1 /= rho1;
uy1 /= rho1;
uz1 /= rho1;
rho2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB );
ux2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB )
- GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB );
uy2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW )
- GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB )
- GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB );
uz2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB )
+ GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB );
ux2 /= rho2;
uy2 /= rho2;
uz2 /= rho2;
rho = 1.0;
ux = 2*ux1 - ux2;
uy = 2*uy1 - uy2;
uz = 2*uz1 - uz2;
u2 = 1.5 * (ux*ux + uy*uy + uz*uz);
LOCAL( srcGrid, C ) = DFL1*rho*(1.0 - u2);
LOCAL( srcGrid, N ) = DFL2*rho*(1.0 + uy*(4.5*uy + 3.0) - u2);
LOCAL( srcGrid, S ) = DFL2*rho*(1.0 + uy*(4.5*uy - 3.0) - u2);
LOCAL( srcGrid, E ) = DFL2*rho*(1.0 + ux*(4.5*ux + 3.0) - u2);
LOCAL( srcGrid, W ) = DFL2*rho*(1.0 + ux*(4.5*ux - 3.0) - u2);
LOCAL( srcGrid, T ) = DFL2*rho*(1.0 + uz*(4.5*uz + 3.0) - u2);
LOCAL( srcGrid, B ) = DFL2*rho*(1.0 + uz*(4.5*uz - 3.0) - u2);
LOCAL( srcGrid, NE) = DFL3*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2);
LOCAL( srcGrid, NW) = DFL3*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2);
LOCAL( srcGrid, SE) = DFL3*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2);
LOCAL( srcGrid, SW) = DFL3*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2);
LOCAL( srcGrid, NT) = DFL3*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2);
LOCAL( srcGrid, NB) = DFL3*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2);
LOCAL( srcGrid, ST) = DFL3*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2);
LOCAL( srcGrid, SB) = DFL3*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2);
LOCAL( srcGrid, ET) = DFL3*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2);
LOCAL( srcGrid, EB) = DFL3*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2);
LOCAL( srcGrid, WT) = DFL3*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2);
LOCAL( srcGrid, WB) = DFL3*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2);
SWEEP_END
}
/*############################################################################*/
void LBM_showGridStatistics( LBM_Grid grid ) {
int nObstacleCells = 0,
nAccelCells = 0,
nFluidCells = 0;
float ux, uy, uz;
float minU2 = 1e+30, maxU2 = -1e+30, u2;
float minRho = 1e+30, maxRho = -1e+30, rho;
float mass = 0;
SWEEP_VAR
SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )
rho = + LOCAL( grid, C ) + LOCAL( grid, N )
+ LOCAL( grid, S ) + LOCAL( grid, E )
+ LOCAL( grid, W ) + LOCAL( grid, T )
+ LOCAL( grid, B ) + LOCAL( grid, NE )
+ LOCAL( grid, NW ) + LOCAL( grid, SE )
+ LOCAL( grid, SW ) + LOCAL( grid, NT )
+ LOCAL( grid, NB ) + LOCAL( grid, ST )
+ LOCAL( grid, SB ) + LOCAL( grid, ET )
+ LOCAL( grid, EB ) + LOCAL( grid, WT )
+ LOCAL( grid, WB );
if( rho < minRho ) minRho = rho;
if( rho > maxRho ) maxRho = rho;
mass += rho;
if( TEST_FLAG_SWEEP( grid, OBSTACLE )) {
nObstacleCells++;
}
else {
if( TEST_FLAG_SWEEP( grid, ACCEL ))
nAccelCells++;
else
nFluidCells++;
ux = + LOCAL( grid, E ) - LOCAL( grid, W )
+ LOCAL( grid, NE ) - LOCAL( grid, NW )
+ LOCAL( grid, SE ) - LOCAL( grid, SW )
+ LOCAL( grid, ET ) + LOCAL( grid, EB )
- LOCAL( grid, WT ) - LOCAL( grid, WB );
uy = + LOCAL( grid, N ) - LOCAL( grid, S )
+ LOCAL( grid, NE ) + LOCAL( grid, NW )
- LOCAL( grid, SE ) - LOCAL( grid, SW )
+ LOCAL( grid, NT ) + LOCAL( grid, NB )
- LOCAL( grid, ST ) - LOCAL( grid, SB );
uz = + LOCAL( grid, T ) - LOCAL( grid, B )
+ LOCAL( grid, NT ) - LOCAL( grid, NB )
+ LOCAL( grid, ST ) - LOCAL( grid, SB )
+ LOCAL( grid, ET ) - LOCAL( grid, EB )
+ LOCAL( grid, WT ) - LOCAL( grid, WB );
u2 = (ux*ux + uy*uy + uz*uz) / (rho*rho);
if( u2 < minU2 ) minU2 = u2;
if( u2 > maxU2 ) maxU2 = u2;
}
SWEEP_END
printf( "LBM_showGridStatistics:\n"
"\tnObstacleCells: %7i nAccelCells: %7i nFluidCells: %7i\n"
"\tminRho: %8.4f maxRho: %8.4f mass: %e\n"
"\tminU: %e maxU: %e\n\n",
nObstacleCells, nAccelCells, nFluidCells,
minRho, maxRho, mass,
sqrt( minU2 ), sqrt( maxU2 ) );
}
/*############################################################################*/
static void storeValue( FILE* file, OUTPUT_PRECISION* v ) {
const int litteBigEndianTest = 1;
if( (*((unsigned char*) &litteBigEndianTest)) == 0 ) { /* big endian */
const char* vPtr = (char*) v;
char buffer[sizeof( OUTPUT_PRECISION )];
int i;
for (i = 0; i < sizeof( OUTPUT_PRECISION ); i++)
buffer[i] = vPtr[sizeof( OUTPUT_PRECISION ) - i - 1];
fwrite( buffer, sizeof( OUTPUT_PRECISION ), 1, file );
}
else { /* little endian */
fwrite( v, sizeof( OUTPUT_PRECISION ), 1, file );
}
}
/*############################################################################*/
static void loadValue( FILE* file, OUTPUT_PRECISION* v ) {
const int litteBigEndianTest = 1;
if( (*((unsigned char*) &litteBigEndianTest)) == 0 ) { /* big endian */
char* vPtr = (char*) v;
char buffer[sizeof( OUTPUT_PRECISION )];
int i;
fread( buffer, sizeof( OUTPUT_PRECISION ), 1, file );
for (i = 0; i < sizeof( OUTPUT_PRECISION ); i++)
vPtr[i] = buffer[sizeof( OUTPUT_PRECISION ) - i - 1];
}
else { /* little endian */
fread( v, sizeof( OUTPUT_PRECISION ), 1, file );
}
}
/*############################################################################*/
void LBM_storeVelocityField( LBM_Grid grid, const char* filename,
const int binary ) {
int x, y, z;
OUTPUT_PRECISION rho, ux, uy, uz;
FILE* file = fopen( filename, (binary ? "wb" : "w") );
for( z = 0; z < SIZE_Z; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
rho = + GRID_ENTRY( grid, x, y, z, C ) + GRID_ENTRY( grid, x, y, z, N )
+ GRID_ENTRY( grid, x, y, z, S ) + GRID_ENTRY( grid, x, y, z, E )
+ GRID_ENTRY( grid, x, y, z, W ) + GRID_ENTRY( grid, x, y, z, T )
+ GRID_ENTRY( grid, x, y, z, B ) + GRID_ENTRY( grid, x, y, z, NE )
+ GRID_ENTRY( grid, x, y, z, NW ) + GRID_ENTRY( grid, x, y, z, SE )
+ GRID_ENTRY( grid, x, y, z, SW ) + GRID_ENTRY( grid, x, y, z, NT )
+ GRID_ENTRY( grid, x, y, z, NB ) + GRID_ENTRY( grid, x, y, z, ST )
+ GRID_ENTRY( grid, x, y, z, SB ) + GRID_ENTRY( grid, x, y, z, ET )
+ GRID_ENTRY( grid, x, y, z, EB ) + GRID_ENTRY( grid, x, y, z, WT )
+ GRID_ENTRY( grid, x, y, z, WB );
ux = + GRID_ENTRY( grid, x, y, z, E ) - GRID_ENTRY( grid, x, y, z, W )
+ GRID_ENTRY( grid, x, y, z, NE ) - GRID_ENTRY( grid, x, y, z, NW )
+ GRID_ENTRY( grid, x, y, z, SE ) - GRID_ENTRY( grid, x, y, z, SW )
+ GRID_ENTRY( grid, x, y, z, ET ) + GRID_ENTRY( grid, x, y, z, EB )
- GRID_ENTRY( grid, x, y, z, WT ) - GRID_ENTRY( grid, x, y, z, WB );
uy = + GRID_ENTRY( grid, x, y, z, N ) - GRID_ENTRY( grid, x, y, z, S )
+ GRID_ENTRY( grid, x, y, z, NE ) + GRID_ENTRY( grid, x, y, z, NW )
- GRID_ENTRY( grid, x, y, z, SE ) - GRID_ENTRY( grid, x, y, z, SW )
+ GRID_ENTRY( grid, x, y, z, NT ) + GRID_ENTRY( grid, x, y, z, NB )
- GRID_ENTRY( grid, x, y, z, ST ) - GRID_ENTRY( grid, x, y, z, SB );
uz = + GRID_ENTRY( grid, x, y, z, T ) - GRID_ENTRY( grid, x, y, z, B )
+ GRID_ENTRY( grid, x, y, z, NT ) - GRID_ENTRY( grid, x, y, z, NB )
+ GRID_ENTRY( grid, x, y, z, ST ) - GRID_ENTRY( grid, x, y, z, SB )
+ GRID_ENTRY( grid, x, y, z, ET ) - GRID_ENTRY( grid, x, y, z, EB )
+ GRID_ENTRY( grid, x, y, z, WT ) - GRID_ENTRY( grid, x, y, z, WB );
ux /= rho;
uy /= rho;
uz /= rho;
if( binary ) {
/*
fwrite( &ux, sizeof( ux ), 1, file );
fwrite( &uy, sizeof( uy ), 1, file );
fwrite( &uz, sizeof( uz ), 1, file );
*/
storeValue( file, &ux );
storeValue( file, &uy );
storeValue( file, &uz );
} else
fprintf( file, "%e %e %e\n", ux, uy, uz );
}
}
}
fclose( file );
}
/*############################################################################*/
void LBM_compareVelocityField( LBM_Grid grid, const char* filename,
const int binary ) {
int x, y, z;
float rho, ux, uy, uz;
OUTPUT_PRECISION fileUx, fileUy, fileUz,
dUx, dUy, dUz,
diff2, maxDiff2 = -1e+30;
FILE* file = fopen( filename, (binary ? "rb" : "r") );
for( z = 0; z < SIZE_Z; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
rho = + GRID_ENTRY( grid, x, y, z, C ) + GRID_ENTRY( grid, x, y, z, N )
+ GRID_ENTRY( grid, x, y, z, S ) + GRID_ENTRY( grid, x, y, z, E )
+ GRID_ENTRY( grid, x, y, z, W ) + GRID_ENTRY( grid, x, y, z, T )
+ GRID_ENTRY( grid, x, y, z, B ) + GRID_ENTRY( grid, x, y, z, NE )
+ GRID_ENTRY( grid, x, y, z, NW ) + GRID_ENTRY( grid, x, y, z, SE )
+ GRID_ENTRY( grid, x, y, z, SW ) + GRID_ENTRY( grid, x, y, z, NT )
+ GRID_ENTRY( grid, x, y, z, NB ) + GRID_ENTRY( grid, x, y, z, ST )
+ GRID_ENTRY( grid, x, y, z, SB ) + GRID_ENTRY( grid, x, y, z, ET )
+ GRID_ENTRY( grid, x, y, z, EB ) + GRID_ENTRY( grid, x, y, z, WT )
+ GRID_ENTRY( grid, x, y, z, WB );
ux = + GRID_ENTRY( grid, x, y, z, E ) - GRID_ENTRY( grid, x, y, z, W )
+ GRID_ENTRY( grid, x, y, z, NE ) - GRID_ENTRY( grid, x, y, z, NW )
+ GRID_ENTRY( grid, x, y, z, SE ) - GRID_ENTRY( grid, x, y, z, SW )
+ GRID_ENTRY( grid, x, y, z, ET ) + GRID_ENTRY( grid, x, y, z, EB )
- GRID_ENTRY( grid, x, y, z, WT ) - GRID_ENTRY( grid, x, y, z, WB );
uy = + GRID_ENTRY( grid, x, y, z, N ) - GRID_ENTRY( grid, x, y, z, S )
+ GRID_ENTRY( grid, x, y, z, NE ) + GRID_ENTRY( grid, x, y, z, NW )
- GRID_ENTRY( grid, x, y, z, SE ) - GRID_ENTRY( grid, x, y, z, SW )
+ GRID_ENTRY( grid, x, y, z, NT ) + GRID_ENTRY( grid, x, y, z, NB )
- GRID_ENTRY( grid, x, y, z, ST ) - GRID_ENTRY( grid, x, y, z, SB );
uz = + GRID_ENTRY( grid, x, y, z, T ) - GRID_ENTRY( grid, x, y, z, B )
+ GRID_ENTRY( grid, x, y, z, NT ) - GRID_ENTRY( grid, x, y, z, NB )
+ GRID_ENTRY( grid, x, y, z, ST ) - GRID_ENTRY( grid, x, y, z, SB )
+ GRID_ENTRY( grid, x, y, z, ET ) - GRID_ENTRY( grid, x, y, z, EB )
+ GRID_ENTRY( grid, x, y, z, WT ) - GRID_ENTRY( grid, x, y, z, WB );
ux /= rho;
uy /= rho;
uz /= rho;
if( binary ) {
loadValue( file, &fileUx );
loadValue( file, &fileUy );
loadValue( file, &fileUz );
}
else {
if( sizeof( OUTPUT_PRECISION ) == sizeof( double )) {
fscanf( file, "%lf %lf %lf\n", &fileUx, &fileUy, &fileUz );
}
else {
fscanf( file, "%f %f %f\n", &fileUx, &fileUy, &fileUz );
}
}
dUx = ux - fileUx;
dUy = uy - fileUy;
dUz = uz - fileUz;
diff2 = dUx*dUx + dUy*dUy + dUz*dUz;
if( diff2 > maxDiff2 ) maxDiff2 = diff2;
}
}
}
#if defined(SPEC_CPU)
printf( "LBM_compareVelocityField: maxDiff = %e \n\n",
sqrt( maxDiff2 ) );
#else
printf( "LBM_compareVelocityField: maxDiff = %e ==> %s\n\n",
sqrt( maxDiff2 ),
sqrt( maxDiff2 ) > 1e-5 ? "##### ERROR #####" : "OK" );
#endif
fclose( file );
}
|
train_codebook.h | #pragma once
#include <limits>
namespace sq_hnswlib {
template <typename T>
uint8_t *train_codebook(
const std::string &filename, const int hnswM, const int hnswefC,
MetricType metric_type,
std::function<void(const std::string &, uint32_t &, uint32_t &)> read_meta,
std::function<const std::string &, T *&, uint32_t &, uint32_t &,
const uint32_t &>
read_bin_file_half_dimension,
std::function<const T *, const int64_t, const int64_t, const int64_t,
float *&>
kmeans) {
uint32_t *pids = nullptr;
uint32_t npts, ndim, nids, nidsdim, npts2;
uint32_t total_n = 0;
read_meta(filename, total_n, ndim);
uint32_t half_dim = ndim / 2;
float *pdata = new float[(uint64_t)total_n * (uint64_t)half_dim];
float *codes = new float[256 * ndim];
uint8_t *codebook = new uint8_t[(uint64_t)total_n * (uint64_t)ndim];
memset(codebook, 0, sizeof(uint8_t) * (uint64_t)total_n * (uint64_t)ndim);
read_bin_file_half_dimension(filename, pdata, npts, ndim, 0);
#pragma omp parallel for
for (uint32_t i = 0; i < half_dim; ++i) {
float *centers = new float[256];
kmeans(total_n, pdata + i * total_n, 1, 256, centers);
for (int j = 0; j < 256; j++) {
codes[j * ndim + i] = centers[j];
}
for (uint32_t j = 0; j < total_n; ++j) {
float min_dis = std::numeric_limits<float>::max();
for (uint32_t k = 0; k < 256; ++k) {
float diff = codes[k * ndim + i] - pdata[i * total_n + j];
float now_dis = diff * diff;
if (now_dis < min_dis) {
min_dis = now_dis;
uint32_t *p32 = &k;
uint8_t *p8 = (uint8_t *)p32;
codebook[j * ndim + i] = (uint8_t)(*(p8));
}
}
}
}
delete[] pdata;
pdata = nullptr;
read_bin_file_half_dimension(filename, pdata, npts, ndim, half_dim);
#pragma omp parallel for
for (uint32_t i = half_dim; i < ndim; ++i) {
float *centers = new float[256];
kmeans(total_n, pdata + (i - half_dim) * total_n, 1, 256, centers);
for (int j = 0; j < 256; j++) {
codes[j * ndim + i] = centers[j];
}
for (uint32_t j = 0; j < total_n; ++j) {
float min_dis = std::numeric_limits<float>::max();
for (uint32_t k = 0; k < 256; ++k) {
float diff = codes[k * ndim + i] - pdata[(i - half_dim) * total_n + j];
float now_dis = diff * diff;
if (now_dis < min_dis) {
min_dis = now_dis;
uint32_t *p32 = &k;
uint8_t *p8 = (uint8_t *)p32;
codebook[j * ndim + i] = (uint8_t)(*(p8));
}
}
}
}
delete[] pdata;
pdata = nullptr;
return codebook;
}
} // namespace sq_hnswlib
|
h2theta.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "compearth.h"
/*!
* @brief Computes dip angle from h.
*
* @param[in] n Number of points in arrays.
* @param[in] h h in rectilinear space such that
* \f$ h \in [0,1] \f$.
* This is an array of dimension [n].
*
* @param[out] theta Dip angle (radians) such that
* \f$ \theta \in [0, \pi/2] \f$.
* This is an array of dimension [n].
*
* @author Ben Baker, ISTI
*
* @copyright MIT
*
*/
void compearth_h2theta(const int n, const double *__restrict__ h,
double *__restrict__ theta)
{
int i;
#pragma omp simd
for (i=0; i<n; i++)
{
theta[i] = acos(h[i]);
theta[i] = fmax(0.0, fmin(theta[i], M_PI_2));
}
return;
}
|
rhs4sgcurv.c | // SW4 LICENSE
// # ----------------------------------------------------------------------
// # SW4 - Seismic Waves, 4th order
// # ----------------------------------------------------------------------
// # Copyright (c) 2013, Lawrence Livermore National Security, LLC.
// # Produced at the Lawrence Livermore National Laboratory.
// #
// # Written by:
// # N. Anders Petersson (petersson1@llnl.gov)
// # Bjorn Sjogreen (sjogreen2@llnl.gov)
// #
// # LLNL-CODE-643337
// #
// # All rights reserved.
// #
// # This file is part of SW4, Version: 1.0
// #
// # Please also read LICENCE.txt, which contains "Our Notice and GNU General Public License"
// #
// # This program is free software; you can redistribute it and/or modify
// # it under the terms of the GNU General Public License (as published by
// # the Free Software Foundation) version 2, dated June 1991.
// #
// # This program is distributed in the hope that it will be useful, but
// # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
// # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
// # conditions of the GNU General Public License for more details.
// #
// # You should have received a copy of the GNU General Public License
// # along with this program; if not, write to the Free Software
// # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
#include "sw4.h"
void rhs4sgcurv( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast,
float_sw4* __restrict__ a_u, float_sw4* __restrict__ a_mu, float_sw4* __restrict__ a_lambda,
float_sw4* __restrict__ a_met, float_sw4* __restrict__ a_jac, float_sw4* __restrict__ a_lu,
int* onesided, float_sw4* __restrict__ a_acof, float_sw4* __restrict__ a_bope,
float_sw4* __restrict__ a_ghcof, float_sw4* __restrict__ a_strx, float_sw4* __restrict__ a_stry )
{
// subroutine CURVILINEAR4SG( ifirst, ilast, jfirst, jlast, kfirst,
// * klast, u, mu, la, met, jac, lu,
// * onesided, acof, bope, ghcof, strx, stry,
// * op )
// Routine with supergrid stretchings strx and stry. No stretching
// in z, since top is always topography, and bottom always interface
// to a deeper Cartesian grid.
// opcount:
// Interior (k>6), 2126 arithmetic ops.
// Boundary discretization (1<=k<=6 ), 6049 arithmetic ops.
const float_sw4 a1 = 0;
const float_sw4 i6 = 1.0/6;
const float_sw4 tf = 0.75;
const float_sw4 c1 = 2.0/3;
const float_sw4 c2 = -1.0/12;
const int ni = ilast-ifirst+1;
const int nij = ni*(jlast-jfirst+1);
const int base = -(ifirst+ni*jfirst+nij*kfirst);
const int base3 = 3*base-1;
const int base4 = 4*base-1;
const int ni3 = 3*ni;
const int nij3 = 3*nij;
const int ni4 = 4*ni;
const int nij4 = 4*nij;
const int ifirst0 = ifirst;
const int jfirst0 = jfirst;
// Direct reuse of fortran code by these macro definitions:
#define mu(i,j,k) a_mu[base+i+ni*(j)+nij*(k)]
#define la(i,j,k) a_lambda[base+i+ni*(j)+nij*(k)]
#define jac(i,j,k) a_jac[base+i+ni*(j)+nij*(k)]
#define u(c,i,j,k) a_u[base3+(c)+3*(i)+ni3*(j)+nij3*(k)]
#define lu(c,i,j,k) a_lu[base3+(c)+3*(i)+ni3*(j)+nij3*(k)]
#define met(c,i,j,k) a_met[base4+(c)+4*(i)+ni4*(j)+nij4*(k)]
#define strx(i) a_strx[i-ifirst0]
#define stry(j) a_stry[j-jfirst0]
#define acof(i,j,k) a_acof[(i-1)+6*(j-1)+48*(k-1)]
#define bope(i,j) a_bope[i-1+6*(j-1)]
#define ghcof(i) a_ghcof[i-1]
#pragma omp parallel
{
int kstart = kfirst+2;
if( onesided[4] == 1 )
{
kstart = 7;
// SBP Boundary closure terms
#pragma omp for
for( int k= 1; k <= 6 ; k++ )
for( int j=jfirst+2; j <= jlast-2 ; j++ )
#pragma simd
#pragma ivdep
for( int i=ifirst+2; i <= ilast-2 ; i++ )
{
// 5 ops
float_sw4 ijac = strx(i)*stry(j)/jac(i,j,k);
float_sw4 istry = 1/(stry(j));
float_sw4 istrx = 1/(strx(i));
float_sw4 istrxy = istry*istrx;
float_sw4 r1 = 0,r2 = 0,r3 = 0;
// pp derivative (u) (u-eq)
// 53 ops, tot=58
float_sw4 cof1=(2*mu(i-2,j,k)+la(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)
*strx(i-2);
float_sw4 cof2=(2*mu(i-1,j,k)+la(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)
*strx(i-1);
float_sw4 cof3=(2*mu(i,j,k)+la(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*strx(i);
float_sw4 cof4=(2*mu(i+1,j,k)+la(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)
*strx(i+1);
float_sw4 cof5=(2*mu(i+2,j,k)+la(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)
*strx(i+2);
float_sw4 mux1 = cof2 -tf*(cof3+cof1);
float_sw4 mux2 = cof1 + cof4+3*(cof3+cof2);
float_sw4 mux3 = cof2 + cof5+3*(cof4+cof3);
float_sw4 mux4 = cof4-tf*(cof3+cof5);
r1 = r1 + i6* (
mux1*(u(1,i-2,j,k)-u(1,i,j,k)) +
mux2*(u(1,i-1,j,k)-u(1,i,j,k)) +
mux3*(u(1,i+1,j,k)-u(1,i,j,k)) +
mux4*(u(1,i+2,j,k)-u(1,i,j,k)) )*istry;
// qq derivative (u) (u-eq)
// 43 ops, tot=101
cof1=(mu(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)*stry(j-2);
cof2=(mu(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)*stry(j-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*stry(j);
cof4=(mu(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)*stry(j+1);
cof5=(mu(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r1 = r1 + i6* (
mux1*(u(1,i,j-2,k)-u(1,i,j,k)) +
mux2*(u(1,i,j-1,k)-u(1,i,j,k)) +
mux3*(u(1,i,j+1,k)-u(1,i,j,k)) +
mux4*(u(1,i,j+2,k)-u(1,i,j,k)) )*istrx;
// pp derivative (v) (v-eq)
// 43 ops, tot=144
cof1=(mu(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)*strx(i-2);
cof2=(mu(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)*strx(i-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*strx(i);
cof4=(mu(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)*strx(i+1);
cof5=(mu(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)*strx(i+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 = r2 + i6* (
mux1*(u(2,i-2,j,k)-u(2,i,j,k)) +
mux2*(u(2,i-1,j,k)-u(2,i,j,k)) +
mux3*(u(2,i+1,j,k)-u(2,i,j,k)) +
mux4*(u(2,i+2,j,k)-u(2,i,j,k)) )*istry;
// qq derivative (v) (v-eq)
// 53 ops, tot=197
cof1=(2*mu(i,j-2,k)+la(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)*stry(j-2);
cof2=(2*mu(i,j-1,k)+la(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)*stry(j-1);
cof3=(2*mu(i,j,k)+la(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*stry(j);
cof4=(2*mu(i,j+1,k)+la(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)*stry(j+1);
cof5=(2*mu(i,j+2,k)+la(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 = r2 + i6* (
mux1*(u(2,i,j-2,k)-u(2,i,j,k)) +
mux2*(u(2,i,j-1,k)-u(2,i,j,k)) +
mux3*(u(2,i,j+1,k)-u(2,i,j,k)) +
mux4*(u(2,i,j+2,k)-u(2,i,j,k)) )*istrx;
// pp derivative (w) (w-eq)
// 43 ops, tot=240
cof1=(mu(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)*strx(i-2);
cof2=(mu(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)*strx(i-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*strx(i);
cof4=(mu(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)*strx(i+1);
cof5=(mu(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)*strx(i+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 = r3 + i6* (
mux1*(u(3,i-2,j,k)-u(3,i,j,k)) +
mux2*(u(3,i-1,j,k)-u(3,i,j,k)) +
mux3*(u(3,i+1,j,k)-u(3,i,j,k)) +
mux4*(u(3,i+2,j,k)-u(3,i,j,k)) )*istry;
// qq derivative (w) (w-eq)
// 43 ops, tot=283
cof1=(mu(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)*stry(j-2);
cof2=(mu(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)*stry(j-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*stry(j);
cof4=(mu(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)*stry(j+1);
cof5=(mu(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 = r3 + i6* (
mux1*(u(3,i,j-2,k)-u(3,i,j,k)) +
mux2*(u(3,i,j-1,k)-u(3,i,j,k)) +
mux3*(u(3,i,j+1,k)-u(3,i,j,k)) +
mux4*(u(3,i,j+2,k)-u(3,i,j,k)) )*istrx;
// All rr-derivatives at once
// averaging the coefficient
// 54*8*8+25*8 = 3656 ops, tot=3939
float_sw4 mucofu2, mucofuv, mucofuw, mucofvw, mucofv2, mucofw2;
for( int q=1 ; q <= 8 ; q++ )
{
mucofu2=0;
mucofuv=0;
mucofuw=0;
mucofvw=0;
mucofv2=0;
mucofw2=0;
for( int m=1 ; m <= 8 ; m++ )
{
mucofu2 +=
acof(k,q,m)*(
(2*mu(i,j,m)+la(i,j,m) )*met(2,i,j,m)*strx(i)*met(2,i,j,m)*strx(i)
+ mu(i,j,m)*(met(3,i,j,m)*stry(j)*met(3,i,j,m)*stry(j)+
met(4,i,j,m)*met(4,i,j,m) )
);
mucofv2 +=
acof(k,q,m)*(
(2*mu(i,j,m)+la(i,j,m) )*met(3,i,j,m)*stry(j)*met(3,i,j,m)*stry(j)
+ mu(i,j,m)*(met(2,i,j,m)*strx(i)*met(2,i,j,m)*strx(i)+
met(4,i,j,m)*met(4,i,j,m) )
);
mucofw2 +=
acof(k,q,m)*((2*mu(i,j,m)+la(i,j,m))*met(4,i,j,m)*met(4,i,j,m)
+ mu(i,j,m)*( met(2,i,j,m)*strx(i)*met(2,i,j,m)*strx(i)+
met(3,i,j,m)*stry(j)*met(3,i,j,m)*stry(j) ) );
mucofuv += acof(k,q,m)*(mu(i,j,m)+la(i,j,m))*met(2,i,j,m)*met(3,i,j,m);
mucofuw += acof(k,q,m)*(mu(i,j,m)+la(i,j,m))*met(2,i,j,m)*met(4,i,j,m);
mucofvw += acof(k,q,m)*(mu(i,j,m)+la(i,j,m))*met(3,i,j,m)*met(4,i,j,m);
}
// Computing the second derivative,
r1 += istrxy*mucofu2*u(1,i,j,q) + mucofuv*u(2,i,j,q) + istry*mucofuw*u(3,i,j,q);
r2 += mucofuv*u(1,i,j,q) + istrxy*mucofv2*u(2,i,j,q) + istrx*mucofvw*u(3,i,j,q);
r3 += istry*mucofuw*u(1,i,j,q) + istrx*mucofvw*u(2,i,j,q) + istrxy*mucofw2*u(3,i,j,q);
}
// Ghost point values, only nonzero for k=1.
// 72 ops., tot=4011
mucofu2 = ghcof(k)*((2*mu(i,j,1)+la(i,j,1))*
met(2,i,j,1)*strx(i)*met(2,i,j,1)*strx(i)
+ mu(i,j,1)*(met(3,i,j,1)*stry(j)*met(3,i,j,1)*stry(j)+
met(4,i,j,1)*met(4,i,j,1) ));
mucofv2 = ghcof(k)*((2*mu(i,j,1)+la(i,j,1))*
met(3,i,j,1)*stry(j)*met(3,i,j,1)*stry(j)
+ mu(i,j,1)*( met(2,i,j,1)*strx(i)*met(2,i,j,1)*strx(i)+
met(4,i,j,1)*met(4,i,j,1) ) );
mucofw2 = ghcof(k)*((2*mu(i,j,1)+la(i,j,1))*met(4,i,j,1)*met(4,i,j,1)
+ mu(i,j,1)*
( met(2,i,j,1)*strx(i)*met(2,i,j,1)*strx(i)+
met(3,i,j,1)*stry(j)*met(3,i,j,1)*stry(j) ) );
mucofuv = ghcof(k)*(mu(i,j,1)+la(i,j,1))*met(2,i,j,1)*met(3,i,j,1);
mucofuw = ghcof(k)*(mu(i,j,1)+la(i,j,1))*met(2,i,j,1)*met(4,i,j,1);
mucofvw = ghcof(k)*(mu(i,j,1)+la(i,j,1))*met(3,i,j,1)*met(4,i,j,1);
r1 += istrxy*mucofu2*u(1,i,j,0) + mucofuv*u(2,i,j,0) + istry*mucofuw*u(3,i,j,0);
r2 += mucofuv*u(1,i,j,0) + istrxy*mucofv2*u(2,i,j,0) + istrx*mucofvw*u(3,i,j,0);
r3 += istry*mucofuw*u(1,i,j,0) + istrx*mucofvw*u(2,i,j,0) + istrxy*mucofw2*u(3,i,j,0);
// pq-derivatives (u-eq)
// 38 ops., tot=4049
r1 +=
c2*( mu(i,j+2,k)*met(1,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(2,i+2,j+2,k)-u(2,i-2,j+2,k)) +
c1*(u(2,i+1,j+2,k)-u(2,i-1,j+2,k)) )
- mu(i,j-2,k)*met(1,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(2,i+2,j-2,k)-u(2,i-2,j-2,k))+
c1*(u(2,i+1,j-2,k)-u(2,i-1,j-2,k)) )
) +
c1*( mu(i,j+1,k)*met(1,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(2,i+2,j+1,k)-u(2,i-2,j+1,k)) +
c1*(u(2,i+1,j+1,k)-u(2,i-1,j+1,k)) )
- mu(i,j-1,k)*met(1,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(2,i+2,j-1,k)-u(2,i-2,j-1,k)) +
c1*(u(2,i+1,j-1,k)-u(2,i-1,j-1,k))));
// qp-derivatives (u-eq)
// 38 ops. tot=4087
r1 +=
c2*( la(i+2,j,k)*met(1,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(2,i+2,j+2,k)-u(2,i+2,j-2,k)) +
c1*(u(2,i+2,j+1,k)-u(2,i+2,j-1,k)) )
- la(i-2,j,k)*met(1,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(2,i-2,j+2,k)-u(2,i-2,j-2,k))+
c1*(u(2,i-2,j+1,k)-u(2,i-2,j-1,k)) )
) +
c1*( la(i+1,j,k)*met(1,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(2,i+1,j+2,k)-u(2,i+1,j-2,k)) +
c1*(u(2,i+1,j+1,k)-u(2,i+1,j-1,k)) )
- la(i-1,j,k)*met(1,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(2,i-1,j+2,k)-u(2,i-1,j-2,k)) +
c1*(u(2,i-1,j+1,k)-u(2,i-1,j-1,k))));
// pq-derivatives (v-eq)
// 38 ops. , tot=4125
r2 +=
c2*( la(i,j+2,k)*met(1,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(1,i+2,j+2,k)-u(1,i-2,j+2,k)) +
c1*(u(1,i+1,j+2,k)-u(1,i-1,j+2,k)) )
- la(i,j-2,k)*met(1,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(1,i+2,j-2,k)-u(1,i-2,j-2,k))+
c1*(u(1,i+1,j-2,k)-u(1,i-1,j-2,k)) )
) +
c1*( la(i,j+1,k)*met(1,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(1,i+2,j+1,k)-u(1,i-2,j+1,k)) +
c1*(u(1,i+1,j+1,k)-u(1,i-1,j+1,k)) )
- la(i,j-1,k)*met(1,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(1,i+2,j-1,k)-u(1,i-2,j-1,k)) +
c1*(u(1,i+1,j-1,k)-u(1,i-1,j-1,k))));
//* qp-derivatives (v-eq)
// 38 ops., tot=4163
r2 +=
c2*( mu(i+2,j,k)*met(1,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(1,i+2,j+2,k)-u(1,i+2,j-2,k)) +
c1*(u(1,i+2,j+1,k)-u(1,i+2,j-1,k)) )
- mu(i-2,j,k)*met(1,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(1,i-2,j+2,k)-u(1,i-2,j-2,k))+
c1*(u(1,i-2,j+1,k)-u(1,i-2,j-1,k)) )
) +
c1*( mu(i+1,j,k)*met(1,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(1,i+1,j+2,k)-u(1,i+1,j-2,k)) +
c1*(u(1,i+1,j+1,k)-u(1,i+1,j-1,k)) )
- mu(i-1,j,k)*met(1,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(1,i-1,j+2,k)-u(1,i-1,j-2,k)) +
c1*(u(1,i-1,j+1,k)-u(1,i-1,j-1,k))));
// rp - derivatives
// 24*8 = 192 ops, tot=4355
float_sw4 dudrm2 = 0, dudrm1=0, dudrp1=0, dudrp2=0;
float_sw4 dvdrm2 = 0, dvdrm1=0, dvdrp1=0, dvdrp2=0;
float_sw4 dwdrm2 = 0, dwdrm1=0, dwdrp1=0, dwdrp2=0;
for( int q=1 ; q <= 8 ; q++ )
{
dudrm2 += bope(k,q)*u(1,i-2,j,q);
dvdrm2 += bope(k,q)*u(2,i-2,j,q);
dwdrm2 += bope(k,q)*u(3,i-2,j,q);
dudrm1 += bope(k,q)*u(1,i-1,j,q);
dvdrm1 += bope(k,q)*u(2,i-1,j,q);
dwdrm1 += bope(k,q)*u(3,i-1,j,q);
dudrp2 += bope(k,q)*u(1,i+2,j,q);
dvdrp2 += bope(k,q)*u(2,i+2,j,q);
dwdrp2 += bope(k,q)*u(3,i+2,j,q);
dudrp1 += bope(k,q)*u(1,i+1,j,q);
dvdrp1 += bope(k,q)*u(2,i+1,j,q);
dwdrp1 += bope(k,q)*u(3,i+1,j,q);
}
// rp derivatives (u-eq)
// 67 ops, tot=4422
r1 += ( c2*(
(2*mu(i+2,j,k)+la(i+2,j,k))*met(2,i+2,j,k)*met(1,i+2,j,k)*
strx(i+2)*dudrp2
+ la(i+2,j,k)*met(3,i+2,j,k)*met(1,i+2,j,k)*dvdrp2*stry(j)
+ la(i+2,j,k)*met(4,i+2,j,k)*met(1,i+2,j,k)*dwdrp2
-((2*mu(i-2,j,k)+la(i-2,j,k))*met(2,i-2,j,k)*met(1,i-2,j,k)*
strx(i-2)*dudrm2
+ la(i-2,j,k)*met(3,i-2,j,k)*met(1,i-2,j,k)*dvdrm2*stry(j)
+ la(i-2,j,k)*met(4,i-2,j,k)*met(1,i-2,j,k)*dwdrm2 )
) + c1*(
(2*mu(i+1,j,k)+la(i+1,j,k))*met(2,i+1,j,k)*met(1,i+1,j,k)*
strx(i+1)*dudrp1
+ la(i+1,j,k)*met(3,i+1,j,k)*met(1,i+1,j,k)*dvdrp1*stry(j)
+ la(i+1,j,k)*met(4,i+1,j,k)*met(1,i+1,j,k)*dwdrp1
-((2*mu(i-1,j,k)+la(i-1,j,k))*met(2,i-1,j,k)*met(1,i-1,j,k)*
strx(i-1)*dudrm1
+ la(i-1,j,k)*met(3,i-1,j,k)*met(1,i-1,j,k)*dvdrm1*stry(j)
+ la(i-1,j,k)*met(4,i-1,j,k)*met(1,i-1,j,k)*dwdrm1 ) ) )*istry;
// rp derivatives (v-eq)
// 42 ops, tot=4464
r2 += c2*(
mu(i+2,j,k)*met(3,i+2,j,k)*met(1,i+2,j,k)*dudrp2
+ mu(i+2,j,k)*met(2,i+2,j,k)*met(1,i+2,j,k)*dvdrp2*
strx(i+2)*istry
- (mu(i-2,j,k)*met(3,i-2,j,k)*met(1,i-2,j,k)*dudrm2
+ mu(i-2,j,k)*met(2,i-2,j,k)*met(1,i-2,j,k)*dvdrm2*
strx(i-2)*istry )
) + c1*(
mu(i+1,j,k)*met(3,i+1,j,k)*met(1,i+1,j,k)*dudrp1
+ mu(i+1,j,k)*met(2,i+1,j,k)*met(1,i+1,j,k)*dvdrp1*
strx(i+1)*istry
- (mu(i-1,j,k)*met(3,i-1,j,k)*met(1,i-1,j,k)*dudrm1
+ mu(i-1,j,k)*met(2,i-1,j,k)*met(1,i-1,j,k)*dvdrm1*
strx(i-1)*istry )
);
// rp derivatives (w-eq)
// 38 ops, tot=4502
r3 += istry*(c2*(
mu(i+2,j,k)*met(4,i+2,j,k)*met(1,i+2,j,k)*dudrp2
+ mu(i+2,j,k)*met(2,i+2,j,k)*met(1,i+2,j,k)*dwdrp2*strx(i+2)
- (mu(i-2,j,k)*met(4,i-2,j,k)*met(1,i-2,j,k)*dudrm2
+ mu(i-2,j,k)*met(2,i-2,j,k)*met(1,i-2,j,k)*dwdrm2*strx(i-2))
) + c1*(
mu(i+1,j,k)*met(4,i+1,j,k)*met(1,i+1,j,k)*dudrp1
+ mu(i+1,j,k)*met(2,i+1,j,k)*met(1,i+1,j,k)*dwdrp1*strx(i+1)
- (mu(i-1,j,k)*met(4,i-1,j,k)*met(1,i-1,j,k)*dudrm1
+ mu(i-1,j,k)*met(2,i-1,j,k)*met(1,i-1,j,k)*dwdrm1*strx(i-1))
) );
// rq - derivatives
// 24*8 = 192 ops , tot=4694
dudrm2 = 0;
dudrm1 = 0;
dudrp1 = 0;
dudrp2 = 0;
dvdrm2 = 0;
dvdrm1 = 0;
dvdrp1 = 0;
dvdrp2 = 0;
dwdrm2 = 0;
dwdrm1 = 0;
dwdrp1 = 0;
dwdrp2 = 0;
for( int q=1 ; q <= 8 ; q++ )
{
dudrm2 += bope(k,q)*u(1,i,j-2,q);
dvdrm2 += bope(k,q)*u(2,i,j-2,q);
dwdrm2 += bope(k,q)*u(3,i,j-2,q);
dudrm1 += bope(k,q)*u(1,i,j-1,q);
dvdrm1 += bope(k,q)*u(2,i,j-1,q);
dwdrm1 += bope(k,q)*u(3,i,j-1,q);
dudrp2 += bope(k,q)*u(1,i,j+2,q);
dvdrp2 += bope(k,q)*u(2,i,j+2,q);
dwdrp2 += bope(k,q)*u(3,i,j+2,q);
dudrp1 += bope(k,q)*u(1,i,j+1,q);
dvdrp1 += bope(k,q)*u(2,i,j+1,q);
dwdrp1 += bope(k,q)*u(3,i,j+1,q);
}
// rq derivatives (u-eq)
// 42 ops, tot=4736
r1 += c2*(
mu(i,j+2,k)*met(3,i,j+2,k)*met(1,i,j+2,k)*dudrp2*
stry(j+2)*istrx
+ mu(i,j+2,k)*met(2,i,j+2,k)*met(1,i,j+2,k)*dvdrp2
- (mu(i,j-2,k)*met(3,i,j-2,k)*met(1,i,j-2,k)*dudrm2*
stry(j-2)*istrx
+ mu(i,j-2,k)*met(2,i,j-2,k)*met(1,i,j-2,k)*dvdrm2)
) + c1*(
mu(i,j+1,k)*met(3,i,j+1,k)*met(1,i,j+1,k)*dudrp1*
stry(j+1)*istrx
+ mu(i,j+1,k)*met(2,i,j+1,k)*met(1,i,j+1,k)*dvdrp1
- (mu(i,j-1,k)*met(3,i,j-1,k)*met(1,i,j-1,k)*dudrm1*
stry(j-1)*istrx
+ mu(i,j-1,k)*met(2,i,j-1,k)*met(1,i,j-1,k)*dvdrm1)
);
// rq derivatives (v-eq)
// 70 ops, tot=4806
r2 += c2*(
la(i,j+2,k)*met(2,i,j+2,k)*met(1,i,j+2,k)*dudrp2
+(2*mu(i,j+2,k)+la(i,j+2,k))*met(3,i,j+2,k)*met(1,i,j+2,k)*dvdrp2
*stry(j+2)*istrx
+ la(i,j+2,k)*met(4,i,j+2,k)*met(1,i,j+2,k)*dwdrp2*istrx
- ( la(i,j-2,k)*met(2,i,j-2,k)*met(1,i,j-2,k)*dudrm2
+(2*mu(i,j-2,k)+la(i,j-2,k))*met(3,i,j-2,k)*met(1,i,j-2,k)*dvdrm2
*stry(j-2)*istrx
+ la(i,j-2,k)*met(4,i,j-2,k)*met(1,i,j-2,k)*dwdrm2*istrx )
) + c1*(
la(i,j+1,k)*met(2,i,j+1,k)*met(1,i,j+1,k)*dudrp1
+(2*mu(i,j+1,k)+la(i,j+1,k))*met(3,i,j+1,k)*met(1,i,j+1,k)*dvdrp1
*stry(j+1)*istrx
+ la(i,j+1,k)*met(4,i,j+1,k)*met(1,i,j+1,k)*dwdrp1*istrx
- ( la(i,j-1,k)*met(2,i,j-1,k)*met(1,i,j-1,k)*dudrm1
+(2*mu(i,j-1,k)+la(i,j-1,k))*met(3,i,j-1,k)*met(1,i,j-1,k)*dvdrm1
*stry(j-1)*istrx
+ la(i,j-1,k)*met(4,i,j-1,k)*met(1,i,j-1,k)*dwdrm1*istrx )
);
// rq derivatives (w-eq)
// 39 ops, tot=4845
r3 += ( c2*(
mu(i,j+2,k)*met(3,i,j+2,k)*met(1,i,j+2,k)*dwdrp2*stry(j+2)
+ mu(i,j+2,k)*met(4,i,j+2,k)*met(1,i,j+2,k)*dvdrp2
- (mu(i,j-2,k)*met(3,i,j-2,k)*met(1,i,j-2,k)*dwdrm2*stry(j-2)
+ mu(i,j-2,k)*met(4,i,j-2,k)*met(1,i,j-2,k)*dvdrm2)
) + c1*(
mu(i,j+1,k)*met(3,i,j+1,k)*met(1,i,j+1,k)*dwdrp1*stry(j+1)
+ mu(i,j+1,k)*met(4,i,j+1,k)*met(1,i,j+1,k)*dvdrp1
- (mu(i,j-1,k)*met(3,i,j-1,k)*met(1,i,j-1,k)*dwdrm1*stry(j-1)
+ mu(i,j-1,k)*met(4,i,j-1,k)*met(1,i,j-1,k)*dvdrm1)
) )*istrx;
// pr and qr derivatives at once
// in loop: 8*(53+53+43) = 1192 ops, tot=6037
for( int q=1 ; q <= 8 ; q++ )
{
// (u-eq)
// 53 ops
r1 += bope(k,q)*(
// pr
(2*mu(i,j,q)+la(i,j,q))*met(2,i,j,q)*met(1,i,j,q)*(
c2*(u(1,i+2,j,q)-u(1,i-2,j,q)) +
c1*(u(1,i+1,j,q)-u(1,i-1,j,q)) )*strx(i)*istry
+ mu(i,j,q)*met(3,i,j,q)*met(1,i,j,q)*(
c2*(u(2,i+2,j,q)-u(2,i-2,j,q)) +
c1*(u(2,i+1,j,q)-u(2,i-1,j,q)) )
+ mu(i,j,q)*met(4,i,j,q)*met(1,i,j,q)*(
c2*(u(3,i+2,j,q)-u(3,i-2,j,q)) +
c1*(u(3,i+1,j,q)-u(3,i-1,j,q)) )*istry
// qr
+ mu(i,j,q)*met(3,i,j,q)*met(1,i,j,q)*(
c2*(u(1,i,j+2,q)-u(1,i,j-2,q)) +
c1*(u(1,i,j+1,q)-u(1,i,j-1,q)) )*stry(j)*istrx
+ la(i,j,q)*met(2,i,j,q)*met(1,i,j,q)*(
c2*(u(2,i,j+2,q)-u(2,i,j-2,q)) +
c1*(u(2,i,j+1,q)-u(2,i,j-1,q)) ) );
// (v-eq)
// 53 ops
r2 += bope(k,q)*(
// pr
la(i,j,q)*met(3,i,j,q)*met(1,i,j,q)*(
c2*(u(1,i+2,j,q)-u(1,i-2,j,q)) +
c1*(u(1,i+1,j,q)-u(1,i-1,j,q)) )
+ mu(i,j,q)*met(2,i,j,q)*met(1,i,j,q)*(
c2*(u(2,i+2,j,q)-u(2,i-2,j,q)) +
c1*(u(2,i+1,j,q)-u(2,i-1,j,q)) )*strx(i)*istry
// qr
+ mu(i,j,q)*met(2,i,j,q)*met(1,i,j,q)*(
c2*(u(1,i,j+2,q)-u(1,i,j-2,q)) +
c1*(u(1,i,j+1,q)-u(1,i,j-1,q)) )
+ (2*mu(i,j,q)+la(i,j,q))*met(3,i,j,q)*met(1,i,j,q)*(
c2*(u(2,i,j+2,q)-u(2,i,j-2,q)) +
c1*(u(2,i,j+1,q)-u(2,i,j-1,q)) )*stry(j)*istrx
+ mu(i,j,q)*met(4,i,j,q)*met(1,i,j,q)*(
c2*(u(3,i,j+2,q)-u(3,i,j-2,q)) +
c1*(u(3,i,j+1,q)-u(3,i,j-1,q)) )*istrx );
// (w-eq)
// 43 ops
r3 += bope(k,q)*(
// pr
la(i,j,q)*met(4,i,j,q)*met(1,i,j,q)*(
c2*(u(1,i+2,j,q)-u(1,i-2,j,q)) +
c1*(u(1,i+1,j,q)-u(1,i-1,j,q)) )*istry
+ mu(i,j,q)*met(2,i,j,q)*met(1,i,j,q)*(
c2*(u(3,i+2,j,q)-u(3,i-2,j,q)) +
c1*(u(3,i+1,j,q)-u(3,i-1,j,q)) )*strx(i)*istry
// qr
+ mu(i,j,q)*met(3,i,j,q)*met(1,i,j,q)*(
c2*(u(3,i,j+2,q)-u(3,i,j-2,q)) +
c1*(u(3,i,j+1,q)-u(3,i,j-1,q)) )*stry(j)*istrx
+ la(i,j,q)*met(4,i,j,q)*met(1,i,j,q)*(
c2*(u(2,i,j+2,q)-u(2,i,j-2,q)) +
c1*(u(2,i,j+1,q)-u(2,i,j-1,q)) )*istrx );
}
// 12 ops, tot=6049
lu(1,i,j,k) = a1*lu(1,i,j,k) + r1*ijac;
lu(2,i,j,k) = a1*lu(2,i,j,k) + r2*ijac;
lu(3,i,j,k) = a1*lu(3,i,j,k) + r3*ijac;
}
}
#pragma omp for
for( int k= kstart; k <= klast-2 ; k++ )
for( int j=jfirst+2; j <= jlast-2 ; j++ )
#pragma simd
#pragma ivdep
for( int i=ifirst+2; i <= ilast-2 ; i++ )
{
// 5 ops
float_sw4 ijac = strx(i)*stry(j)/jac(i,j,k);
float_sw4 istry = 1/(stry(j));
float_sw4 istrx = 1/(strx(i));
float_sw4 istrxy = istry*istrx;
float_sw4 r1 = 0, r2=0, r3=0;
// pp derivative (u)
// 53 ops, tot=58
float_sw4 cof1=(2*mu(i-2,j,k)+la(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)
*strx(i-2);
float_sw4 cof2=(2*mu(i-1,j,k)+la(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)
*strx(i-1);
float_sw4 cof3=(2*mu(i,j,k)+la(i,j,k))*met(1,i,j,k)*met(1,i,j,k)
*strx(i);
float_sw4 cof4=(2*mu(i+1,j,k)+la(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)
*strx(i+1);
float_sw4 cof5=(2*mu(i+2,j,k)+la(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)
*strx(i+2);
float_sw4 mux1 = cof2 -tf*(cof3+cof1);
float_sw4 mux2 = cof1 + cof4+3*(cof3+cof2);
float_sw4 mux3 = cof2 + cof5+3*(cof4+cof3);
float_sw4 mux4 = cof4-tf*(cof3+cof5);
r1 += i6* (
mux1*(u(1,i-2,j,k)-u(1,i,j,k)) +
mux2*(u(1,i-1,j,k)-u(1,i,j,k)) +
mux3*(u(1,i+1,j,k)-u(1,i,j,k)) +
mux4*(u(1,i+2,j,k)-u(1,i,j,k)) )*istry;
// qq derivative (u)
// 43 ops, tot=101
cof1=(mu(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)*stry(j-2);
cof2=(mu(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)*stry(j-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*stry(j);
cof4=(mu(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)*stry(j+1);
cof5=(mu(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r1 += i6* (
mux1*(u(1,i,j-2,k)-u(1,i,j,k)) +
mux2*(u(1,i,j-1,k)-u(1,i,j,k)) +
mux3*(u(1,i,j+1,k)-u(1,i,j,k)) +
mux4*(u(1,i,j+2,k)-u(1,i,j,k)) )*istrx;
// rr derivative (u)
// 5*11+14+14=83 ops, tot=184
cof1 = (2*mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*strx(i)*met(2,i,j,k-2)*strx(i)
+ mu(i,j,k-2)*(met(3,i,j,k-2)*stry(j)*met(3,i,j,k-2)*stry(j)+
met(4,i,j,k-2)*met(4,i,j,k-2));
cof2 = (2*mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*strx(i)*met(2,i,j,k-1)*strx(i)
+ mu(i,j,k-1)*(met(3,i,j,k-1)*stry(j)*met(3,i,j,k-1)*stry(j)+
met(4,i,j,k-1)*met(4,i,j,k-1) );
cof3 = (2*mu(i,j,k)+la(i,j,k))*met(2,i,j,k)*strx(i)*met(2,i,j,k)*strx(i) +
mu(i,j,k)*(met(3,i,j,k)*stry(j)*met(3,i,j,k)*stry(j)+
met(4,i,j,k)*met(4,i,j,k));
cof4 = (2*mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*strx(i)*met(2,i,j,k+1)*strx(i)
+ mu(i,j,k+1)*(met(3,i,j,k+1)*stry(j)*met(3,i,j,k+1)*stry(j)+
met(4,i,j,k+1)*met(4,i,j,k+1));
cof5 = (2*mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*strx(i)*met(2,i,j,k+2)*strx(i)
+ mu(i,j,k+2)*( met(3,i,j,k+2)*stry(j)*met(3,i,j,k+2)*stry(j)+
met(4,i,j,k+2)*met(4,i,j,k+2));
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r1 += i6* (
mux1*(u(1,i,j,k-2)-u(1,i,j,k)) +
mux2*(u(1,i,j,k-1)-u(1,i,j,k)) +
mux3*(u(1,i,j,k+1)-u(1,i,j,k)) +
mux4*(u(1,i,j,k+2)-u(1,i,j,k)) )*istrxy;
// rr derivative (v)
// 42 ops, tot=226
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*met(3,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*met(3,i,j,k-1);
cof3=(mu(i,j,k)+la(i,j,k))*met(2,i,j,k)*met(3,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*met(3,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*met(3,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r1 += i6* (
mux1*(u(2,i,j,k-2)-u(2,i,j,k)) +
mux2*(u(2,i,j,k-1)-u(2,i,j,k)) +
mux3*(u(2,i,j,k+1)-u(2,i,j,k)) +
mux4*(u(2,i,j,k+2)-u(2,i,j,k)) );
// rr derivative (w)
// 43 ops, tot=269
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*met(4,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*met(4,i,j,k-1);
cof3=(mu(i,j,k)+la(i,j,k))*met(2,i,j,k)*met(4,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*met(4,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*met(4,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r1 += i6* (
mux1*(u(3,i,j,k-2)-u(3,i,j,k)) +
mux2*(u(3,i,j,k-1)-u(3,i,j,k)) +
mux3*(u(3,i,j,k+1)-u(3,i,j,k)) +
mux4*(u(3,i,j,k+2)-u(3,i,j,k)) )*istry;
// pq-derivatives
// 38 ops, tot=307
r1 +=
c2*( mu(i,j+2,k)*met(1,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(2,i+2,j+2,k)-u(2,i-2,j+2,k)) +
c1*(u(2,i+1,j+2,k)-u(2,i-1,j+2,k)) )
- mu(i,j-2,k)*met(1,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(2,i+2,j-2,k)-u(2,i-2,j-2,k))+
c1*(u(2,i+1,j-2,k)-u(2,i-1,j-2,k)) )
) +
c1*( mu(i,j+1,k)*met(1,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(2,i+2,j+1,k)-u(2,i-2,j+1,k)) +
c1*(u(2,i+1,j+1,k)-u(2,i-1,j+1,k)) )
- mu(i,j-1,k)*met(1,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(2,i+2,j-1,k)-u(2,i-2,j-1,k)) +
c1*(u(2,i+1,j-1,k)-u(2,i-1,j-1,k))));
// qp-derivatives
// 38 ops, tot=345
r1 +=
c2*( la(i+2,j,k)*met(1,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(2,i+2,j+2,k)-u(2,i+2,j-2,k)) +
c1*(u(2,i+2,j+1,k)-u(2,i+2,j-1,k)) )
- la(i-2,j,k)*met(1,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(2,i-2,j+2,k)-u(2,i-2,j-2,k))+
c1*(u(2,i-2,j+1,k)-u(2,i-2,j-1,k)) )
) +
c1*( la(i+1,j,k)*met(1,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(2,i+1,j+2,k)-u(2,i+1,j-2,k)) +
c1*(u(2,i+1,j+1,k)-u(2,i+1,j-1,k)) )
- la(i-1,j,k)*met(1,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(2,i-1,j+2,k)-u(2,i-1,j-2,k)) +
c1*(u(2,i-1,j+1,k)-u(2,i-1,j-1,k))));
// pr-derivatives
// 130 ops., tot=475
r1 += c2*(
(2*mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(1,i+2,j,k+2)-u(1,i-2,j,k+2)) +
c1*(u(1,i+1,j,k+2)-u(1,i-1,j,k+2)) )*strx(i)*istry
+ mu(i,j,k+2)*met(3,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(2,i+2,j,k+2)-u(2,i-2,j,k+2)) +
c1*(u(2,i+1,j,k+2)-u(2,i-1,j,k+2)) )
+ mu(i,j,k+2)*met(4,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(3,i+2,j,k+2)-u(3,i-2,j,k+2)) +
c1*(u(3,i+1,j,k+2)-u(3,i-1,j,k+2)) )*istry
- ((2*mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(1,i+2,j,k-2)-u(1,i-2,j,k-2)) +
c1*(u(1,i+1,j,k-2)-u(1,i-1,j,k-2)) )*strx(i)*istry
+ mu(i,j,k-2)*met(3,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(2,i+2,j,k-2)-u(2,i-2,j,k-2)) +
c1*(u(2,i+1,j,k-2)-u(2,i-1,j,k-2)) )
+ mu(i,j,k-2)*met(4,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(3,i+2,j,k-2)-u(3,i-2,j,k-2)) +
c1*(u(3,i+1,j,k-2)-u(3,i-1,j,k-2)) )*istry )
) + c1*(
(2*mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(1,i+2,j,k+1)-u(1,i-2,j,k+1)) +
c1*(u(1,i+1,j,k+1)-u(1,i-1,j,k+1)) )*strx(i)*istry
+ mu(i,j,k+1)*met(3,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(2,i+2,j,k+1)-u(2,i-2,j,k+1)) +
c1*(u(2,i+1,j,k+1)-u(2,i-1,j,k+1)) )
+ mu(i,j,k+1)*met(4,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(3,i+2,j,k+1)-u(3,i-2,j,k+1)) +
c1*(u(3,i+1,j,k+1)-u(3,i-1,j,k+1)) )*istry
- ((2*mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(1,i+2,j,k-1)-u(1,i-2,j,k-1)) +
c1*(u(1,i+1,j,k-1)-u(1,i-1,j,k-1)) )*strx(i)*istry
+ mu(i,j,k-1)*met(3,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(2,i+2,j,k-1)-u(2,i-2,j,k-1)) +
c1*(u(2,i+1,j,k-1)-u(2,i-1,j,k-1)) )
+ mu(i,j,k-1)*met(4,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(3,i+2,j,k-1)-u(3,i-2,j,k-1)) +
c1*(u(3,i+1,j,k-1)-u(3,i-1,j,k-1)) )*istry ) );
// rp derivatives
// 130 ops, tot=605
r1 += ( c2*(
(2*mu(i+2,j,k)+la(i+2,j,k))*met(2,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(1,i+2,j,k+2)-u(1,i+2,j,k-2)) +
c1*(u(1,i+2,j,k+1)-u(1,i+2,j,k-1)) )*strx(i+2)
+ la(i+2,j,k)*met(3,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(2,i+2,j,k+2)-u(2,i+2,j,k-2)) +
c1*(u(2,i+2,j,k+1)-u(2,i+2,j,k-1)) )*stry(j)
+ la(i+2,j,k)*met(4,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(3,i+2,j,k+2)-u(3,i+2,j,k-2)) +
c1*(u(3,i+2,j,k+1)-u(3,i+2,j,k-1)) )
- ((2*mu(i-2,j,k)+la(i-2,j,k))*met(2,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(1,i-2,j,k+2)-u(1,i-2,j,k-2)) +
c1*(u(1,i-2,j,k+1)-u(1,i-2,j,k-1)) )*strx(i-2)
+ la(i-2,j,k)*met(3,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(2,i-2,j,k+2)-u(2,i-2,j,k-2)) +
c1*(u(2,i-2,j,k+1)-u(2,i-2,j,k-1)) )*stry(j)
+ la(i-2,j,k)*met(4,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(3,i-2,j,k+2)-u(3,i-2,j,k-2)) +
c1*(u(3,i-2,j,k+1)-u(3,i-2,j,k-1)) ) )
) + c1*(
(2*mu(i+1,j,k)+la(i+1,j,k))*met(2,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(1,i+1,j,k+2)-u(1,i+1,j,k-2)) +
c1*(u(1,i+1,j,k+1)-u(1,i+1,j,k-1)) )*strx(i+1)
+ la(i+1,j,k)*met(3,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(2,i+1,j,k+2)-u(2,i+1,j,k-2)) +
c1*(u(2,i+1,j,k+1)-u(2,i+1,j,k-1)) )*stry(j)
+ la(i+1,j,k)*met(4,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(3,i+1,j,k+2)-u(3,i+1,j,k-2)) +
c1*(u(3,i+1,j,k+1)-u(3,i+1,j,k-1)) )
- ((2*mu(i-1,j,k)+la(i-1,j,k))*met(2,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(1,i-1,j,k+2)-u(1,i-1,j,k-2)) +
c1*(u(1,i-1,j,k+1)-u(1,i-1,j,k-1)) )*strx(i-1)
+ la(i-1,j,k)*met(3,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(2,i-1,j,k+2)-u(2,i-1,j,k-2)) +
c1*(u(2,i-1,j,k+1)-u(2,i-1,j,k-1)) )*stry(j)
+ la(i-1,j,k)*met(4,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(3,i-1,j,k+2)-u(3,i-1,j,k-2)) +
c1*(u(3,i-1,j,k+1)-u(3,i-1,j,k-1)) ) ) ) )*istry;
// qr derivatives
// 82 ops, tot=687
r1 += c2*(
mu(i,j,k+2)*met(3,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(1,i,j+2,k+2)-u(1,i,j-2,k+2)) +
c1*(u(1,i,j+1,k+2)-u(1,i,j-1,k+2)) )*stry(j)*istrx
+ la(i,j,k+2)*met(2,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j-2,k+2)) +
c1*(u(2,i,j+1,k+2)-u(2,i,j-1,k+2)) )
- ( mu(i,j,k-2)*met(3,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(1,i,j+2,k-2)-u(1,i,j-2,k-2)) +
c1*(u(1,i,j+1,k-2)-u(1,i,j-1,k-2)) )*stry(j)*istrx
+ la(i,j,k-2)*met(2,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(2,i,j+2,k-2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j+1,k-2)-u(2,i,j-1,k-2)) ) )
) + c1*(
mu(i,j,k+1)*met(3,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(1,i,j+2,k+1)-u(1,i,j-2,k+1)) +
c1*(u(1,i,j+1,k+1)-u(1,i,j-1,k+1)) )*stry(j)*istrx
+ la(i,j,k+1)*met(2,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(2,i,j+2,k+1)-u(2,i,j-2,k+1)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j-1,k+1)) )
- ( mu(i,j,k-1)*met(3,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(1,i,j+2,k-1)-u(1,i,j-2,k-1)) +
c1*(u(1,i,j+1,k-1)-u(1,i,j-1,k-1)) )*stry(j)*istrx
+ la(i,j,k-1)*met(2,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(2,i,j+2,k-1)-u(2,i,j-2,k-1)) +
c1*(u(2,i,j+1,k-1)-u(2,i,j-1,k-1)) ) ) );
// rq derivatives
// 82 ops, tot=769
r1 += c2*(
mu(i,j+2,k)*met(3,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(1,i,j+2,k+2)-u(1,i,j+2,k-2)) +
c1*(u(1,i,j+2,k+1)-u(1,i,j+2,k-1)) )*stry(j+2)*istrx
+ mu(i,j+2,k)*met(2,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j+2,k-2)) +
c1*(u(2,i,j+2,k+1)-u(2,i,j+2,k-1)) )
- ( mu(i,j-2,k)*met(3,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(1,i,j-2,k+2)-u(1,i,j-2,k-2)) +
c1*(u(1,i,j-2,k+1)-u(1,i,j-2,k-1)) )*stry(j-2)*istrx
+ mu(i,j-2,k)*met(2,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(2,i,j-2,k+2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j-2,k+1)-u(2,i,j-2,k-1)) ) )
) + c1*(
mu(i,j+1,k)*met(3,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(1,i,j+1,k+2)-u(1,i,j+1,k-2)) +
c1*(u(1,i,j+1,k+1)-u(1,i,j+1,k-1)) )*stry(j+1)*istrx
+ mu(i,j+1,k)*met(2,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(2,i,j+1,k+2)-u(2,i,j+1,k-2)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j+1,k-1)) )
- ( mu(i,j-1,k)*met(3,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(1,i,j-1,k+2)-u(1,i,j-1,k-2)) +
c1*(u(1,i,j-1,k+1)-u(1,i,j-1,k-1)) )*stry(j-1)*istrx
+ mu(i,j-1,k)*met(2,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(2,i,j-1,k+2)-u(2,i,j-1,k-2)) +
c1*(u(2,i,j-1,k+1)-u(2,i,j-1,k-1)) ) ) );
// 4 ops, tot=773
lu(1,i,j,k) = a1*lu(1,i,j,k) + r1*ijac;
// v-equation
// r1 = 0;
// pp derivative (v)
// 43 ops, tot=816
cof1=(mu(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)*strx(i-2);
cof2=(mu(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)*strx(i-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*strx(i);
cof4=(mu(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)*strx(i+1);
cof5=(mu(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)*strx(i+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 += i6* (
mux1*(u(2,i-2,j,k)-u(2,i,j,k)) +
mux2*(u(2,i-1,j,k)-u(2,i,j,k)) +
mux3*(u(2,i+1,j,k)-u(2,i,j,k)) +
mux4*(u(2,i+2,j,k)-u(2,i,j,k)) )*istry;
// qq derivative (v)
// 53 ops, tot=869
cof1=(2*mu(i,j-2,k)+la(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)
*stry(j-2);
cof2=(2*mu(i,j-1,k)+la(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)
*stry(j-1);
cof3=(2*mu(i,j,k)+la(i,j,k))*met(1,i,j,k)*met(1,i,j,k)
*stry(j);
cof4=(2*mu(i,j+1,k)+la(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)
*stry(j+1);
cof5=(2*mu(i,j+2,k)+la(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)
*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 += i6* (
mux1*(u(2,i,j-2,k)-u(2,i,j,k)) +
mux2*(u(2,i,j-1,k)-u(2,i,j,k)) +
mux3*(u(2,i,j+1,k)-u(2,i,j,k)) +
mux4*(u(2,i,j+2,k)-u(2,i,j,k)) )*istrx;
// rr derivative (u)
// 42 ops, tot=911
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*met(3,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*met(3,i,j,k-1);
cof3=(mu(i,j,k)+ la(i,j,k) )*met(2,i,j,k)* met(3,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*met(3,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*met(3,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 += i6* (
mux1*(u(1,i,j,k-2)-u(1,i,j,k)) +
mux2*(u(1,i,j,k-1)-u(1,i,j,k)) +
mux3*(u(1,i,j,k+1)-u(1,i,j,k)) +
mux4*(u(1,i,j,k+2)-u(1,i,j,k)) );
// rr derivative (v)
// 83 ops, tot=994
cof1 = (2*mu(i,j,k-2)+la(i,j,k-2))*met(3,i,j,k-2)*stry(j)*met(3,i,j,k-2)*stry(j)
+ mu(i,j,k-2)*(met(2,i,j,k-2)*strx(i)*met(2,i,j,k-2)*strx(i)+
met(4,i,j,k-2)*met(4,i,j,k-2));
cof2 = (2*mu(i,j,k-1)+la(i,j,k-1))*met(3,i,j,k-1)*stry(j)*met(3,i,j,k-1)*stry(j)
+ mu(i,j,k-1)*(met(2,i,j,k-1)*strx(i)*met(2,i,j,k-1)*strx(i)+
met(4,i,j,k-1)*met(4,i,j,k-1));
cof3 = (2*mu(i,j,k)+la(i,j,k))*met(3,i,j,k)*stry(j)*met(3,i,j,k)*stry(j) +
mu(i,j,k)*(met(2,i,j,k)*strx(i)*met(2,i,j,k)*strx(i)+
met(4,i,j,k)*met(4,i,j,k));
cof4 = (2*mu(i,j,k+1)+la(i,j,k+1))*met(3,i,j,k+1)*stry(j)*met(3,i,j,k+1)*stry(j)
+ mu(i,j,k+1)*(met(2,i,j,k+1)*strx(i)*met(2,i,j,k+1)*strx(i)+
met(4,i,j,k+1)*met(4,i,j,k+1));
cof5 = (2*mu(i,j,k+2)+la(i,j,k+2))*met(3,i,j,k+2)*stry(j)*met(3,i,j,k+2)*stry(j)
+ mu(i,j,k+2)*(met(2,i,j,k+2)*strx(i)*met(2,i,j,k+2)*strx(i)+
met(4,i,j,k+2)*met(4,i,j,k+2));
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 += i6* (
mux1*(u(2,i,j,k-2)-u(2,i,j,k)) +
mux2*(u(2,i,j,k-1)-u(2,i,j,k)) +
mux3*(u(2,i,j,k+1)-u(2,i,j,k)) +
mux4*(u(2,i,j,k+2)-u(2,i,j,k)) )*istrxy;
// rr derivative (w)
// 43 ops, tot=1037
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(3,i,j,k-2)*met(4,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(3,i,j,k-1)*met(4,i,j,k-1);
cof3=(mu(i,j,k) +la(i,j,k) )*met(3,i,j,k)* met(4,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(3,i,j,k+1)*met(4,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(3,i,j,k+2)*met(4,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 += i6* (
mux1*(u(3,i,j,k-2)-u(3,i,j,k)) +
mux2*(u(3,i,j,k-1)-u(3,i,j,k)) +
mux3*(u(3,i,j,k+1)-u(3,i,j,k)) +
mux4*(u(3,i,j,k+2)-u(3,i,j,k)) )*istrx;
// pq-derivatives
// 38 ops, tot=1075
r2 +=
c2*( la(i,j+2,k)*met(1,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(1,i+2,j+2,k)-u(1,i-2,j+2,k)) +
c1*(u(1,i+1,j+2,k)-u(1,i-1,j+2,k)) )
- la(i,j-2,k)*met(1,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(1,i+2,j-2,k)-u(1,i-2,j-2,k))+
c1*(u(1,i+1,j-2,k)-u(1,i-1,j-2,k)) )
) +
c1*( la(i,j+1,k)*met(1,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(1,i+2,j+1,k)-u(1,i-2,j+1,k)) +
c1*(u(1,i+1,j+1,k)-u(1,i-1,j+1,k)) )
- la(i,j-1,k)*met(1,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(1,i+2,j-1,k)-u(1,i-2,j-1,k)) +
c1*(u(1,i+1,j-1,k)-u(1,i-1,j-1,k))));
// qp-derivatives
// 38 ops, tot=1113
r2 +=
c2*( mu(i+2,j,k)*met(1,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(1,i+2,j+2,k)-u(1,i+2,j-2,k)) +
c1*(u(1,i+2,j+1,k)-u(1,i+2,j-1,k)) )
- mu(i-2,j,k)*met(1,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(1,i-2,j+2,k)-u(1,i-2,j-2,k))+
c1*(u(1,i-2,j+1,k)-u(1,i-2,j-1,k)) )
) +
c1*( mu(i+1,j,k)*met(1,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(1,i+1,j+2,k)-u(1,i+1,j-2,k)) +
c1*(u(1,i+1,j+1,k)-u(1,i+1,j-1,k)) )
- mu(i-1,j,k)*met(1,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(1,i-1,j+2,k)-u(1,i-1,j-2,k)) +
c1*(u(1,i-1,j+1,k)-u(1,i-1,j-1,k))));
// pr-derivatives
// 82 ops, tot=1195
r2 += c2*(
(la(i,j,k+2))*met(3,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(1,i+2,j,k+2)-u(1,i-2,j,k+2)) +
c1*(u(1,i+1,j,k+2)-u(1,i-1,j,k+2)) )
+ mu(i,j,k+2)*met(2,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(2,i+2,j,k+2)-u(2,i-2,j,k+2)) +
c1*(u(2,i+1,j,k+2)-u(2,i-1,j,k+2)) )*strx(i)*istry
- ((la(i,j,k-2))*met(3,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(1,i+2,j,k-2)-u(1,i-2,j,k-2)) +
c1*(u(1,i+1,j,k-2)-u(1,i-1,j,k-2)) )
+ mu(i,j,k-2)*met(2,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(2,i+2,j,k-2)-u(2,i-2,j,k-2)) +
c1*(u(2,i+1,j,k-2)-u(2,i-1,j,k-2)) )*strx(i)*istry )
) + c1*(
(la(i,j,k+1))*met(3,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(1,i+2,j,k+1)-u(1,i-2,j,k+1)) +
c1*(u(1,i+1,j,k+1)-u(1,i-1,j,k+1)) )
+ mu(i,j,k+1)*met(2,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(2,i+2,j,k+1)-u(2,i-2,j,k+1)) +
c1*(u(2,i+1,j,k+1)-u(2,i-1,j,k+1)) )*strx(i)*istry
- (la(i,j,k-1)*met(3,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(1,i+2,j,k-1)-u(1,i-2,j,k-1)) +
c1*(u(1,i+1,j,k-1)-u(1,i-1,j,k-1)) )
+ mu(i,j,k-1)*met(2,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(2,i+2,j,k-1)-u(2,i-2,j,k-1)) +
c1*(u(2,i+1,j,k-1)-u(2,i-1,j,k-1)) )*strx(i)*istry ) );
// rp derivatives
// 82 ops, tot=1277
r2 += c2*(
(mu(i+2,j,k))*met(3,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(1,i+2,j,k+2)-u(1,i+2,j,k-2)) +
c1*(u(1,i+2,j,k+1)-u(1,i+2,j,k-1)) )
+ mu(i+2,j,k)*met(2,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(2,i+2,j,k+2)-u(2,i+2,j,k-2)) +
c1*(u(2,i+2,j,k+1)-u(2,i+2,j,k-1)) )*strx(i+2)*istry
- (mu(i-2,j,k)*met(3,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(1,i-2,j,k+2)-u(1,i-2,j,k-2)) +
c1*(u(1,i-2,j,k+1)-u(1,i-2,j,k-1)) )
+ mu(i-2,j,k)*met(2,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(2,i-2,j,k+2)-u(2,i-2,j,k-2)) +
c1*(u(2,i-2,j,k+1)-u(2,i-2,j,k-1)) )*strx(i-2)*istry )
) + c1*(
(mu(i+1,j,k))*met(3,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(1,i+1,j,k+2)-u(1,i+1,j,k-2)) +
c1*(u(1,i+1,j,k+1)-u(1,i+1,j,k-1)) )
+ mu(i+1,j,k)*met(2,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(2,i+1,j,k+2)-u(2,i+1,j,k-2)) +
c1*(u(2,i+1,j,k+1)-u(2,i+1,j,k-1)) )*strx(i+1)*istry
- (mu(i-1,j,k)*met(3,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(1,i-1,j,k+2)-u(1,i-1,j,k-2)) +
c1*(u(1,i-1,j,k+1)-u(1,i-1,j,k-1)) )
+ mu(i-1,j,k)*met(2,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(2,i-1,j,k+2)-u(2,i-1,j,k-2)) +
c1*(u(2,i-1,j,k+1)-u(2,i-1,j,k-1)) )*strx(i-1)*istry ) );
// qr derivatives
// 130 ops, tot=1407
r2 += c2*(
mu(i,j,k+2)*met(2,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(1,i,j+2,k+2)-u(1,i,j-2,k+2)) +
c1*(u(1,i,j+1,k+2)-u(1,i,j-1,k+2)) )
+ (2*mu(i,j,k+2)+la(i,j,k+2))*met(3,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j-2,k+2)) +
c1*(u(2,i,j+1,k+2)-u(2,i,j-1,k+2)) )*stry(j)*istrx
+mu(i,j,k+2)*met(4,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(3,i,j+2,k+2)-u(3,i,j-2,k+2)) +
c1*(u(3,i,j+1,k+2)-u(3,i,j-1,k+2)) )*istrx
- ( mu(i,j,k-2)*met(2,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(1,i,j+2,k-2)-u(1,i,j-2,k-2)) +
c1*(u(1,i,j+1,k-2)-u(1,i,j-1,k-2)) )
+(2*mu(i,j,k-2)+ la(i,j,k-2))*met(3,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(2,i,j+2,k-2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j+1,k-2)-u(2,i,j-1,k-2)) )*stry(j)*istrx +
mu(i,j,k-2)*met(4,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(3,i,j+2,k-2)-u(3,i,j-2,k-2)) +
c1*(u(3,i,j+1,k-2)-u(3,i,j-1,k-2)) )*istrx )
) + c1*(
mu(i,j,k+1)*met(2,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(1,i,j+2,k+1)-u(1,i,j-2,k+1)) +
c1*(u(1,i,j+1,k+1)-u(1,i,j-1,k+1)) )
+ (2*mu(i,j,k+1)+la(i,j,k+1))*met(3,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(2,i,j+2,k+1)-u(2,i,j-2,k+1)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j-1,k+1)) )*stry(j)*istrx
+ mu(i,j,k+1)*met(4,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(3,i,j+2,k+1)-u(3,i,j-2,k+1)) +
c1*(u(3,i,j+1,k+1)-u(3,i,j-1,k+1)) )*istrx
- ( mu(i,j,k-1)*met(2,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(1,i,j+2,k-1)-u(1,i,j-2,k-1)) +
c1*(u(1,i,j+1,k-1)-u(1,i,j-1,k-1)) )
+ (2*mu(i,j,k-1)+la(i,j,k-1))*met(3,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(2,i,j+2,k-1)-u(2,i,j-2,k-1)) +
c1*(u(2,i,j+1,k-1)-u(2,i,j-1,k-1)) )*stry(j)*istrx
+ mu(i,j,k-1)*met(4,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(3,i,j+2,k-1)-u(3,i,j-2,k-1)) +
c1*(u(3,i,j+1,k-1)-u(3,i,j-1,k-1)) )*istrx ) );
// rq derivatives
// 130 ops, tot=1537
r2 += c2*(
la(i,j+2,k)*met(2,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(1,i,j+2,k+2)-u(1,i,j+2,k-2)) +
c1*(u(1,i,j+2,k+1)-u(1,i,j+2,k-1)) )
+(2*mu(i,j+2,k)+la(i,j+2,k))*met(3,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j+2,k-2)) +
c1*(u(2,i,j+2,k+1)-u(2,i,j+2,k-1)) )*stry(j+2)*istrx
+ la(i,j+2,k)*met(4,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(3,i,j+2,k+2)-u(3,i,j+2,k-2)) +
c1*(u(3,i,j+2,k+1)-u(3,i,j+2,k-1)) )*istrx
- ( la(i,j-2,k)*met(2,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(1,i,j-2,k+2)-u(1,i,j-2,k-2)) +
c1*(u(1,i,j-2,k+1)-u(1,i,j-2,k-1)) )
+(2*mu(i,j-2,k)+la(i,j-2,k))*met(3,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(2,i,j-2,k+2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j-2,k+1)-u(2,i,j-2,k-1)) )*stry(j-2)*istrx
+ la(i,j-2,k)*met(4,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(3,i,j-2,k+2)-u(3,i,j-2,k-2)) +
c1*(u(3,i,j-2,k+1)-u(3,i,j-2,k-1)) )*istrx )
) + c1*(
la(i,j+1,k)*met(2,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(1,i,j+1,k+2)-u(1,i,j+1,k-2)) +
c1*(u(1,i,j+1,k+1)-u(1,i,j+1,k-1)) )
+ (2*mu(i,j+1,k)+la(i,j+1,k))*met(3,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(2,i,j+1,k+2)-u(2,i,j+1,k-2)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j+1,k-1)) )*stry(j+1)*istrx
+la(i,j+1,k)*met(4,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(3,i,j+1,k+2)-u(3,i,j+1,k-2)) +
c1*(u(3,i,j+1,k+1)-u(3,i,j+1,k-1)) )*istrx
- ( la(i,j-1,k)*met(2,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(1,i,j-1,k+2)-u(1,i,j-1,k-2)) +
c1*(u(1,i,j-1,k+1)-u(1,i,j-1,k-1)) )
+ (2*mu(i,j-1,k)+la(i,j-1,k))*met(3,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(2,i,j-1,k+2)-u(2,i,j-1,k-2)) +
c1*(u(2,i,j-1,k+1)-u(2,i,j-1,k-1)) )*stry(j-1)*istrx
+ la(i,j-1,k)*met(4,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(3,i,j-1,k+2)-u(3,i,j-1,k-2)) +
c1*(u(3,i,j-1,k+1)-u(3,i,j-1,k-1)) )*istrx ) );
// 4 ops, tot=1541
lu(2,i,j,k) = a1*lu(2,i,j,k) + r2*ijac;
// w-equation
// r1 = 0;
// pp derivative (w)
// 43 ops, tot=1580
cof1=(mu(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)*strx(i-2);
cof2=(mu(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)*strx(i-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*strx(i);
cof4=(mu(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)*strx(i+1);
cof5=(mu(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)*strx(i+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 += i6* (
mux1*(u(3,i-2,j,k)-u(3,i,j,k)) +
mux2*(u(3,i-1,j,k)-u(3,i,j,k)) +
mux3*(u(3,i+1,j,k)-u(3,i,j,k)) +
mux4*(u(3,i+2,j,k)-u(3,i,j,k)) )*istry;
// qq derivative (w)
// 43 ops, tot=1623
cof1=(mu(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)*stry(j-2);
cof2=(mu(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)*stry(j-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*stry(j);
cof4=(mu(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)*stry(j+1);
cof5=(mu(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 += i6* (
mux1*(u(3,i,j-2,k)-u(3,i,j,k)) +
mux2*(u(3,i,j-1,k)-u(3,i,j,k)) +
mux3*(u(3,i,j+1,k)-u(3,i,j,k)) +
mux4*(u(3,i,j+2,k)-u(3,i,j,k)) )*istrx;
// rr derivative (u)
// 43 ops, tot=1666
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*met(4,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*met(4,i,j,k-1);
cof3=(mu(i,j,k)+la(i,j,k))*met(2,i,j,k)*met(4,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*met(4,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*met(4,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 += i6* (
mux1*(u(1,i,j,k-2)-u(1,i,j,k)) +
mux2*(u(1,i,j,k-1)-u(1,i,j,k)) +
mux3*(u(1,i,j,k+1)-u(1,i,j,k)) +
mux4*(u(1,i,j,k+2)-u(1,i,j,k)) )*istry;
// rr derivative (v)
// 43 ops, tot=1709
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(3,i,j,k-2)*met(4,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(3,i,j,k-1)*met(4,i,j,k-1);
cof3=(mu(i,j,k)+la(i,j,k))*met(3,i,j,k)*met(4,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(3,i,j,k+1)*met(4,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(3,i,j,k+2)*met(4,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 += i6* (
mux1*(u(2,i,j,k-2)-u(2,i,j,k)) +
mux2*(u(2,i,j,k-1)-u(2,i,j,k)) +
mux3*(u(2,i,j,k+1)-u(2,i,j,k)) +
mux4*(u(2,i,j,k+2)-u(2,i,j,k)) )*istrx;
// rr derivative (w)
// 83 ops, tot=1792
cof1 = (2*mu(i,j,k-2)+la(i,j,k-2))*met(4,i,j,k-2)*met(4,i,j,k-2) +
mu(i,j,k-2)*(met(2,i,j,k-2)*strx(i)*met(2,i,j,k-2)*strx(i)+
met(3,i,j,k-2)*stry(j)*met(3,i,j,k-2)*stry(j) );
cof2 = (2*mu(i,j,k-1)+la(i,j,k-1))*met(4,i,j,k-1)*met(4,i,j,k-1) +
mu(i,j,k-1)*(met(2,i,j,k-1)*strx(i)*met(2,i,j,k-1)*strx(i)+
met(3,i,j,k-1)*stry(j)*met(3,i,j,k-1)*stry(j) );
cof3 = (2*mu(i,j,k)+la(i,j,k))*met(4,i,j,k)*met(4,i,j,k) +
mu(i,j,k)*(met(2,i,j,k)*strx(i)*met(2,i,j,k)*strx(i)+
met(3,i,j,k)*stry(j)*met(3,i,j,k)*stry(j) );
cof4 = (2*mu(i,j,k+1)+la(i,j,k+1))*met(4,i,j,k+1)*met(4,i,j,k+1) +
mu(i,j,k+1)*(met(2,i,j,k+1)*strx(i)*met(2,i,j,k+1)*strx(i)+
met(3,i,j,k+1)*stry(j)*met(3,i,j,k+1)*stry(j));
cof5 = (2*mu(i,j,k+2)+la(i,j,k+2))*met(4,i,j,k+2)*met(4,i,j,k+2) +
mu(i,j,k+2)*( met(2,i,j,k+2)*strx(i)*met(2,i,j,k+2)*strx(i)+
met(3,i,j,k+2)*stry(j)*met(3,i,j,k+2)*stry(j) );
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 += i6* (
mux1*(u(3,i,j,k-2)-u(3,i,j,k)) +
mux2*(u(3,i,j,k-1)-u(3,i,j,k)) +
mux3*(u(3,i,j,k+1)-u(3,i,j,k)) +
mux4*(u(3,i,j,k+2)-u(3,i,j,k)) )*istrxy
// pr-derivatives
// 86 ops, tot=1878
// r1 +=
+ c2*(
(la(i,j,k+2))*met(4,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(1,i+2,j,k+2)-u(1,i-2,j,k+2)) +
c1*(u(1,i+1,j,k+2)-u(1,i-1,j,k+2)) )*istry
+ mu(i,j,k+2)*met(2,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(3,i+2,j,k+2)-u(3,i-2,j,k+2)) +
c1*(u(3,i+1,j,k+2)-u(3,i-1,j,k+2)) )*strx(i)*istry
- ((la(i,j,k-2))*met(4,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(1,i+2,j,k-2)-u(1,i-2,j,k-2)) +
c1*(u(1,i+1,j,k-2)-u(1,i-1,j,k-2)) )*istry
+ mu(i,j,k-2)*met(2,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(3,i+2,j,k-2)-u(3,i-2,j,k-2)) +
c1*(u(3,i+1,j,k-2)-u(3,i-1,j,k-2)) )*strx(i)*istry )
) + c1*(
(la(i,j,k+1))*met(4,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(1,i+2,j,k+1)-u(1,i-2,j,k+1)) +
c1*(u(1,i+1,j,k+1)-u(1,i-1,j,k+1)) )*istry
+ mu(i,j,k+1)*met(2,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(3,i+2,j,k+1)-u(3,i-2,j,k+1)) +
c1*(u(3,i+1,j,k+1)-u(3,i-1,j,k+1)) )*strx(i)*istry
- (la(i,j,k-1)*met(4,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(1,i+2,j,k-1)-u(1,i-2,j,k-1)) +
c1*(u(1,i+1,j,k-1)-u(1,i-1,j,k-1)) )*istry
+ mu(i,j,k-1)*met(2,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(3,i+2,j,k-1)-u(3,i-2,j,k-1)) +
c1*(u(3,i+1,j,k-1)-u(3,i-1,j,k-1)) )*strx(i)*istry ) )
// rp derivatives
// 79 ops, tot=1957
// r1 +=
+ istry*(c2*(
(mu(i+2,j,k))*met(4,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(1,i+2,j,k+2)-u(1,i+2,j,k-2)) +
c1*(u(1,i+2,j,k+1)-u(1,i+2,j,k-1)) )
+ mu(i+2,j,k)*met(2,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(3,i+2,j,k+2)-u(3,i+2,j,k-2)) +
c1*(u(3,i+2,j,k+1)-u(3,i+2,j,k-1)) )*strx(i+2)
- (mu(i-2,j,k)*met(4,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(1,i-2,j,k+2)-u(1,i-2,j,k-2)) +
c1*(u(1,i-2,j,k+1)-u(1,i-2,j,k-1)) )
+ mu(i-2,j,k)*met(2,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(3,i-2,j,k+2)-u(3,i-2,j,k-2)) +
c1*(u(3,i-2,j,k+1)-u(3,i-2,j,k-1)) )*strx(i-2) )
) + c1*(
(mu(i+1,j,k))*met(4,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(1,i+1,j,k+2)-u(1,i+1,j,k-2)) +
c1*(u(1,i+1,j,k+1)-u(1,i+1,j,k-1)) )
+ mu(i+1,j,k)*met(2,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(3,i+1,j,k+2)-u(3,i+1,j,k-2)) +
c1*(u(3,i+1,j,k+1)-u(3,i+1,j,k-1)) )*strx(i+1)
- (mu(i-1,j,k)*met(4,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(1,i-1,j,k+2)-u(1,i-1,j,k-2)) +
c1*(u(1,i-1,j,k+1)-u(1,i-1,j,k-1)) )
+ mu(i-1,j,k)*met(2,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(3,i-1,j,k+2)-u(3,i-1,j,k-2)) +
c1*(u(3,i-1,j,k+1)-u(3,i-1,j,k-1)) )*strx(i-1) ) ) )
// qr derivatives
// 86 ops, tot=2043
// r1 +=
+ c2*(
mu(i,j,k+2)*met(3,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(3,i,j+2,k+2)-u(3,i,j-2,k+2)) +
c1*(u(3,i,j+1,k+2)-u(3,i,j-1,k+2)) )*stry(j)*istrx
+ la(i,j,k+2)*met(4,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j-2,k+2)) +
c1*(u(2,i,j+1,k+2)-u(2,i,j-1,k+2)) )*istrx
- ( mu(i,j,k-2)*met(3,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(3,i,j+2,k-2)-u(3,i,j-2,k-2)) +
c1*(u(3,i,j+1,k-2)-u(3,i,j-1,k-2)) )*stry(j)*istrx
+ la(i,j,k-2)*met(4,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(2,i,j+2,k-2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j+1,k-2)-u(2,i,j-1,k-2)) )*istrx )
) + c1*(
mu(i,j,k+1)*met(3,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(3,i,j+2,k+1)-u(3,i,j-2,k+1)) +
c1*(u(3,i,j+1,k+1)-u(3,i,j-1,k+1)) )*stry(j)*istrx
+ la(i,j,k+1)*met(4,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(2,i,j+2,k+1)-u(2,i,j-2,k+1)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j-1,k+1)) )*istrx
- ( mu(i,j,k-1)*met(3,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(3,i,j+2,k-1)-u(3,i,j-2,k-1)) +
c1*(u(3,i,j+1,k-1)-u(3,i,j-1,k-1)) )*stry(j)*istrx
+ la(i,j,k-1)*met(4,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(2,i,j+2,k-1)-u(2,i,j-2,k-1)) +
c1*(u(2,i,j+1,k-1)-u(2,i,j-1,k-1)) )*istrx ) )
// rq derivatives
// 79 ops, tot=2122
// r1 +=
+ istrx*(c2*(
mu(i,j+2,k)*met(3,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(3,i,j+2,k+2)-u(3,i,j+2,k-2)) +
c1*(u(3,i,j+2,k+1)-u(3,i,j+2,k-1)) )*stry(j+2)
+ mu(i,j+2,k)*met(4,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j+2,k-2)) +
c1*(u(2,i,j+2,k+1)-u(2,i,j+2,k-1)) )
- ( mu(i,j-2,k)*met(3,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(3,i,j-2,k+2)-u(3,i,j-2,k-2)) +
c1*(u(3,i,j-2,k+1)-u(3,i,j-2,k-1)) )*stry(j-2)
+ mu(i,j-2,k)*met(4,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(2,i,j-2,k+2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j-2,k+1)-u(2,i,j-2,k-1)) ) )
) + c1*(
mu(i,j+1,k)*met(3,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(3,i,j+1,k+2)-u(3,i,j+1,k-2)) +
c1*(u(3,i,j+1,k+1)-u(3,i,j+1,k-1)) )*stry(j+1)
+ mu(i,j+1,k)*met(4,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(2,i,j+1,k+2)-u(2,i,j+1,k-2)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j+1,k-1)) )
- ( mu(i,j-1,k)*met(3,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(3,i,j-1,k+2)-u(3,i,j-1,k-2)) +
c1*(u(3,i,j-1,k+1)-u(3,i,j-1,k-1)) )*stry(j-1)
+ mu(i,j-1,k)*met(4,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(2,i,j-1,k+2)-u(2,i,j-1,k-2)) +
c1*(u(2,i,j-1,k+1)-u(2,i,j-1,k-1)) ) ) ) );
// 4 ops, tot=2126
lu(3,i,j,k) = a1*lu(3,i,j,k) + r3*ijac;
}
}
#undef mu
#undef la
#undef jac
#undef u
#undef lu
#undef met
#undef strx
#undef stry
#undef acof
#undef bope
#undef ghcof
}
|
rumi6r.c | /*
* Date: 11 December 2015
* Contact: Thomas Peyrin - thomas.peyrin@gmail.com
*/
/*
* Simulation of boomerang analysis for Skinny
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: hsn.hadipour@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
// #define DEBUG 1
#define Nthreads 4 // Number of parallel threads utilized in this program
#define NumOfExperiments 128 // Number of independent experiments
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void init_prng(int offset) {
// unsigned int initial_seed = 0x5ED90662;
// unsigned int initial_seed = 0x30051991; My birthday!
unsigned int initial_seed = 10*time(NULL) + 11*offset;
srand(initial_seed); // Initialization, should only be called once. int r = rand();
printf("[+] PRNG initialized to 0x%08X\n", initial_seed);
}
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
int i;
unsigned char p1[16], p2[16];
unsigned char c3[16], c4[16];
unsigned char k1[48], k2[48], k3[48], k4[48];
// randomly choose k1
for (i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
int num = 0;
for (int t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
p1[i] = rand() & 0xff;
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
p2[i] = p1[i] ^ dp[i];
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
c3[i] = p1[i] ^ dc[i];
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
c4[i] = p2[i] ^ dc[i];
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int num = 0;
int ID = omp_get_thread_num();
init_prng(ID);
for (int j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, dk1, dk2);
}
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
int main()
{
// srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
// init_prng(1);
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int R = 6; // Number of rounds
int ver = 5; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "00000000000000000000004000000000";
char dc_str[] = "00000000000000000000000000000000";
char dk1_str[] = "00000000000000000000000000002a00000000000000000000000000000099000000000000000000000000000000f300";
char dk2_str[] = "000000000000000000000054000000000000000000000000000000f30000000000000000000000000000007f00000000";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of parallel threads : N1
int deg1 = 22;
int deg2 = 0;
int N2 = 1 << deg1; // Number of bunches per thread : N2 = 2^(deg)
int N3 = 1 << deg2; // Number of queries per bunche : N3
//################### Number of total queries : N1*N2*N3 ###############
char all_results[NumOfExperiments][20];
double sum = 0;
double sum_temp = 0;
for (int i = 0; i < NumOfExperiments; i++)
{
printf("Experiment Number %d:\n", i);
sum_temp = send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
sum += sum_temp;
sum_temp = (double)(N1 * N2 * N3) / sum_temp;
sprintf(all_results[i], "2^(-%0.2f), ", log(sum_temp) / log(2));
}
printf("A summary of all results:\n");
for (int i = 0; i < NumOfExperiments; i++)
{
printf("%s", all_results[i]);
}
printf("\n##########################\nAverage = 2^(-%0.4f)\n",
(log(NumOfExperiments) + log(N1) + log(N2) + log(N3) - log(sum))/log(2));
return 0;
}
|
Example_target.6.c | /*
* @@name: target.6c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.5
*/
#define THRESHOLD1 1000000
#define THRESHOLD2 1000
extern void init(float*, float*, int);
extern void output(float*, int);
void vec_mult(float *p, float *v1, float *v2, int N)
{
int i;
init(v1, v2, N);
#pragma omp target parallel for \
if(target: N>THRESHOLD1) if(parallel: N>THRESHOLD2) \
map(to: v1[0:N], v2[:N]) map(from: p[0:N])
for (i=0; i<N; i++)
p[i] = v1[i] * v2[i];
output(p, N);
}
|
Stmt.h | //===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <string>
namespace llvm {
class FoldingSetNodeID;
}
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class IdentifierInfo;
class LabelDecl;
class ParmVarDecl;
class PrinterHelper;
struct PrintingPolicy;
class QualType;
class RecordDecl;
class SourceManager;
class StringLiteral;
class SwitchStmt;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class LLVM_ALIGNAS(LLVM_PTR_SIZE) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
void *operator new(size_t bytes) LLVM_NOEXCEPT {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) LLVM_NOEXCEPT {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
class StmtBitfields {
friend class Stmt;
/// \brief The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class CompoundStmtBitfields {
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
};
class ExprBitfields {
friend class Expr;
friend class DeclRefExpr; // computeDependence
friend class InitListExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class ASTStmtReader; // deserialization
friend class CXXNewExpr; // ctor
friend class DependentScopeDeclRefExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CallExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class ObjCMessageExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ShuffleVectorExpr; // ctor
friend class ParenListExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class OverloadExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class AtomicExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 2;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = 16 };
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 2;
};
enum APFloatSemantics {
IEEEhalf,
IEEEsingle,
IEEEdouble,
x87DoubleExtended,
IEEEquad,
PPCDoubleDouble
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 2;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class DeclRefExprBitfields {
friend class DeclRefExpr;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
};
class CastExprBitfields {
friend class CastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned BasePathSize : 32 - 6 - NumExprBits;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
};
class ExprWithCleanupsBitfields {
friend class ExprWithCleanups;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
unsigned NumObjects : 32 - NumExprBits;
};
class PseudoObjectExprBitfields {
friend class PseudoObjectExpr;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class TypeTraitExprBitfields {
friend class TypeTraitExpr;
friend class ASTStmtReader;
friend class ASTStmtWriter;
unsigned : NumExprBits;
/// \brief The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// \brief If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// \brief The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
union {
StmtBitfields StmtBits;
CompoundStmtBitfields CompoundStmtBits;
ExprBitfields ExprBits;
CharacterLiteralBitfields CharacterLiteralBits;
FloatingLiteralBitfields FloatingLiteralBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
DeclRefExprBitfields DeclRefExprBits;
CastExprBitfields CastExprBits;
CallExprBitfields CallExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
InitListExprBitfields InitListExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
};
friend class ASTStmtReader;
friend class ASTStmtWriter;
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) LLVM_NOEXCEPT { return mem; }
void operator delete(void *, const ASTContext &, unsigned) LLVM_NOEXCEPT {}
void operator delete(void *, const ASTContext *, unsigned) LLVM_NOEXCEPT {}
void operator delete(void *, size_t) LLVM_NOEXCEPT {}
void operator delete(void *, void *) LLVM_NOEXCEPT {}
public:
/// \brief A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell { };
protected:
/// Iterator for iterating over Stmt * arrays that contain only Expr *
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
struct ExprIterator
: llvm::iterator_adaptor_base<ExprIterator, Stmt **,
std::random_access_iterator_tag, Expr *> {
ExprIterator() : iterator_adaptor_base(nullptr) {}
ExprIterator(Stmt **I) : iterator_adaptor_base(I) {}
reference operator*() const {
assert((*I)->getStmtClass() >= firstExprConstant &&
(*I)->getStmtClass() <= lastExprConstant);
return *reinterpret_cast<Expr **>(I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only Expr *
struct ConstExprIterator
: llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *,
std::random_access_iterator_tag,
const Expr *const> {
ConstExprIterator() : iterator_adaptor_base(nullptr) {}
ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {}
reference operator*() const {
assert((*I)->getStmtClass() >= firstExprConstant &&
(*I)->getStmtClass() <= lastExprConstant);
return *reinterpret_cast<const Expr *const *>(I);
}
};
private:
/// \brief Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// \brief Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt(StmtClass SC) {
static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getLocStart() const LLVM_READONLY;
SourceLocation getLocEnd() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// \brief Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy,
unsigned Indentation = 0) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip past any implicit AST nodes which might surround this
/// statement, such as ExprWithCleanups or ImplicitCastExpr nodes.
Stmt *IgnoreImplicit();
/// \brief Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
typedef StmtIterator child_iterator;
typedef ConstStmtIterator const_child_iterator;
typedef llvm::iterator_range<child_iterator> child_range;
typedef llvm::iterator_range<const_child_iterator> const_child_range;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// \brief Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
///
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc,
SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg),
StartLoc(startLoc), EndLoc(endLoc) {}
/// \brief Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { }
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const {
return DG.isSingleDecl();
}
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
SourceLocation getStartLoc() const { return StartLoc; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
typedef DeclGroupRef::iterator decl_iterator;
typedef DeclGroupRef::const_iterator const_decl_iterator;
typedef llvm::iterator_range<decl_iterator> decl_range;
typedef llvm::iterator_range<const_decl_iterator> decl_const_range;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
SourceLocation SemiLoc;
/// \brief True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
bool HasLeadingEmptyMacro;
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass), SemiLoc(L),
HasLeadingEmptyMacro(hasLeadingEmptyMacro) {}
/// \brief Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty),
HasLeadingEmptyMacro(false) { }
SourceLocation getSemiLoc() const { return SemiLoc; }
void setSemiLoc(SourceLocation L) { SemiLoc = L; }
bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; }
SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
friend class ASTStmtReader;
friend class ASTStmtWriter;
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
///
class CompoundStmt : public Stmt {
Stmt** Body;
SourceLocation LBraceLoc, RBraceLoc;
friend class ASTStmtReader;
public:
CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts,
SourceLocation LB, SourceLocation RB);
// \brief Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
}
// \brief Build an empty compound statement.
explicit CompoundStmt(EmptyShell Empty)
: Stmt(CompoundStmtClass, Empty), Body(nullptr) {
CompoundStmtBits.NumStmts = 0;
}
void setStmts(const ASTContext &C, ArrayRef<Stmt *> Stmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
typedef Stmt** body_iterator;
typedef llvm::iterator_range<body_iterator> body_range;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return Body; }
body_iterator body_end() { return Body + size(); }
Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; }
Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; }
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
Body[size()-1] = S;
}
typedef Stmt* const * const_body_iterator;
typedef llvm::iterator_range<const_body_iterator> body_const_range;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const { return Body; }
const_body_iterator body_end() const { return Body + size(); }
const Stmt *body_front() const {
return !body_empty() ? Body[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? Body[size() - 1] : nullptr;
}
typedef std::reverse_iterator<body_iterator> reverse_body_iterator;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
typedef std::reverse_iterator<const_body_iterator>
const_reverse_body_iterator;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; }
SourceLocation getLBracLoc() const { return LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() {
return child_range(Body, Body + CompoundStmtBits.NumStmts);
}
const_child_range children() const {
return const_child_range(child_iterator(Body),
child_iterator(Body + CompoundStmtBits.NumStmts));
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
// A pointer to the following CaseStmt or DefaultStmt class,
// used by SwitchStmt.
SwitchCase *NextSwitchCase;
SourceLocation KeywordLoc;
SourceLocation ColonLoc;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) {
}
SwitchCase(StmtClass SC, EmptyShell)
: Stmt(SC), NextSwitchCase(nullptr) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return KeywordLoc; }
void setKeywordLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase*>(this)->getSubStmt();
}
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
class CaseStmt : public SwitchCase {
SourceLocation EllipsisLoc;
enum { LHS, RHS, SUBSTMT, END_EXPR };
Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for
// GNU "case 1 ... 4" extension
public:
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
SubExprs[SUBSTMT] = nullptr;
SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs);
SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs);
EllipsisLoc = ellipsisLoc;
}
/// \brief Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { }
SourceLocation getCaseLoc() const { return KeywordLoc; }
void setCaseLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); }
Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); }
Stmt *getSubStmt() { return SubExprs[SUBSTMT]; }
const Expr *getLHS() const {
return reinterpret_cast<const Expr*>(SubExprs[LHS]);
}
const Expr *getRHS() const {
return reinterpret_cast<const Expr*>(SubExprs[RHS]);
}
const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; }
void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; }
void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); }
void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[END_EXPR]);
}
};
class DefaultStmt : public SwitchCase {
Stmt* SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) :
SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// \brief Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) { }
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return KeywordLoc; }
void setDefaultLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
};
inline SourceLocation SwitchCase::getLocEnd() const {
if (const CaseStmt *CS = dyn_cast<CaseStmt>(this))
return CS->getLocEnd();
return cast<DefaultStmt>(this)->getLocEnd();
}
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
///
class LabelStmt : public Stmt {
SourceLocation IdentLoc;
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) {
static_assert(sizeof(LabelStmt) ==
2 * sizeof(SourceLocation) + 2 * sizeof(void *),
"LabelStmt too big");
}
// \brief Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { }
SourceLocation getIdentLoc() const { return IdentLoc; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setIdentLoc(SourceLocation L) { IdentLoc = L; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// \brief Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
///
class AttributedStmt : public Stmt {
Stmt *SubStmt;
SourceLocation AttrLoc;
unsigned NumAttrs;
friend class ASTStmtReader;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt)
: Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc),
NumAttrs(Attrs.size()) {
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) {
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return reinterpret_cast<const Attr *const *>(this + 1);
}
const Attr **getAttrArrayPtr() {
return reinterpret_cast<const Attr **>(this + 1);
}
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr*> Attrs, Stmt *SubStmt);
// \brief Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttrLoc; }
ArrayRef<const Attr*> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
///
class IfStmt : public Stmt {
enum { VAR, COND, THEN, ELSE, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation IfLoc;
SourceLocation ElseLoc;
public:
IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
Stmt *then, SourceLocation EL = SourceLocation(),
Stmt *elsev = nullptr);
/// \brief Build an empty if/then/else statement
explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
const Stmt *getThen() const { return SubExprs[THEN]; }
void setThen(Stmt *S) { SubExprs[THEN] = S; }
const Stmt *getElse() const { return SubExprs[ELSE]; }
void setElse(Stmt *S) { SubExprs[ELSE] = S; }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Stmt *getThen() { return SubExprs[THEN]; }
Stmt *getElse() { return SubExprs[ELSE]; }
SourceLocation getIfLoc() const { return IfLoc; }
void setIfLoc(SourceLocation L) { IfLoc = L; }
SourceLocation getElseLoc() const { return ElseLoc; }
void setElseLoc(SourceLocation L) { ElseLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
if (SubExprs[ELSE])
return SubExprs[ELSE]->getLocEnd();
else
return SubExprs[THEN]->getLocEnd();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
///
class SwitchStmt : public Stmt {
SourceLocation SwitchLoc;
enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
// This points to a linked list of case and default statements and, if the
// SwitchStmt is a switch on an enum value, records whether all the enum
// values were covered by CaseStmts. The coverage information value is meant
// to be a hint for possible clients.
llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase;
public:
SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond);
/// \brief Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Stmt *getBody() const { return SubExprs[BODY]; }
const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); }
/// \brief Set the case list for this switch statement.
void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); }
SourceLocation getSwitchLoc() const { return SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
SubExprs[BODY] = S;
SwitchLoc = SL;
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase()
&& "case/default already added to a switch");
SC->setNextSwitchCase(FirstCase.getPointer());
FirstCase.setPointer(SC);
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { FirstCase.setInt(true); }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const { return FirstCase.getInt(); }
SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd();
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
///
class WhileStmt : public Stmt {
SourceLocation WhileLoc;
enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
public:
WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
SourceLocation WL);
/// \brief Build an empty while statement.
explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// DoStmt - This represents a 'do/while' stmt.
///
class DoStmt : public Stmt {
SourceLocation DoLoc;
enum { BODY, COND, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) {
SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
SubExprs[BODY] = body;
}
/// \brief Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getDoLoc() const { return DoLoc; }
void setDoLoc(SourceLocation L) { DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
///
class ForStmt : public Stmt {
SourceLocation ForLoc;
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// \brief Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { }
Stmt *getInit() { return SubExprs[INIT]; }
/// \brief Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForLoc; }
void setForLoc(SourceLocation L) { ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
///
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation GotoLoc;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {}
/// \brief Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { }
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
///
class IndirectGotoStmt : public Stmt {
SourceLocation GotoLoc;
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc,
Expr *target)
: Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc),
Target((Stmt*)target) {}
/// \brief Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) { }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr*>(Target); }
const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt*>(this)->getConstantTarget();
}
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target+1); }
};
/// ContinueStmt - This represents a continue.
///
class ContinueStmt : public Stmt {
SourceLocation ContinueLoc;
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {}
/// \brief Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { }
SourceLocation getContinueLoc() const { return ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// BreakStmt - This represents a break.
///
class BreakStmt : public Stmt {
SourceLocation BreakLoc;
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {
static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation),
"BreakStmt too large");
}
/// \brief Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { }
SourceLocation getBreakLoc() const { return BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
///
class ReturnStmt : public Stmt {
SourceLocation RetLoc;
Stmt *RetExpr;
const VarDecl *NRVOCandidate;
public:
explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {}
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate)
: Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E),
NRVOCandidate(NRVOCandidate) {}
/// \brief Build an empty return expression.
explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { }
const Expr *getRetValue() const;
Expr *getRetValue();
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); }
SourceLocation getReturnLoc() const { return RetLoc; }
void setReturnLoc(SourceLocation L) { RetLoc = L; }
/// \brief Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const { return NRVOCandidate; }
void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; }
SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return RetExpr ? RetExpr->getLocEnd() : RetLoc;
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr) return child_range(&RetExpr, &RetExpr+1);
return child_range(child_iterator(), child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
///
class AsmStmt : public Stmt {
protected:
SourceLocation AsmLoc;
/// \brief True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// \brief If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers) :
Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { }
friend class ASTStmtReader;
public:
/// \brief Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) :
Stmt(SC, Empty), Exprs(nullptr) { }
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); }
SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
typedef ExprIterator inputs_iterator;
typedef ConstExprIterator const_inputs_iterator;
typedef llvm::iterator_range<inputs_iterator> inputs_range;
typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
typedef ExprIterator outputs_iterator;
typedef ConstExprIterator const_outputs_iterator;
typedef llvm::iterator_range<outputs_iterator> outputs_range;
typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
///
class GCCAsmStmt : public AsmStmt {
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints;
StringLiteral **Clobbers;
IdentifierInfo **Names;
friend class ASTStmtReader;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// \brief Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty),
Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {
}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const {
return Str;
}
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const {
return Names[i];
}
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
///
class MSAsmStmt : public AsmStmt {
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks;
Token *AsmToks;
StringRef *Constraints;
StringRef *Clobbers;
friend class ASTStmtReader;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// \brief Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty),
NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { }
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { }
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc,
Stmt *Block);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { }
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getLocEnd(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { }
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
///
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// \brief Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { }
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// \brief The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// \brief Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
/// \brief Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
///
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// \brief Determine the kind of capture.
VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); }
/// \brief Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// \brief Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// \brief Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// \brief Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// \brief Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// \brief Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const {
assert((capturesVariable() || capturesVariableByCopy()) &&
"No variable available for 'this' or VAT capture");
return VarAndKind.getPointer();
}
friend class ASTStmtReader;
};
private:
/// \brief The number of variable captured, including 'this'.
unsigned NumCaptures;
/// \brief The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind;
/// \brief The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl;
/// \brief Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// \brief Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// \brief Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// \brief Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); }
const CapturedDecl *getCapturedDecl() const {
return CapDeclAndKind.getPointer();
}
/// \brief Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D) {
assert(D && "null CapturedDecl");
CapDeclAndKind.setPointer(D);
}
/// \brief Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const {
return CapDeclAndKind.getInt();
}
/// \brief Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind) {
CapDeclAndKind.setInt(Kind);
}
/// \brief Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// \brief Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// \brief True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// \brief An iterator that walks over the captures.
typedef Capture *capture_iterator;
typedef const Capture *const_capture_iterator;
typedef llvm::iterator_range<capture_iterator> capture_range;
typedef llvm::iterator_range<const_capture_iterator> capture_const_range;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// \brief Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// \brief Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// \brief Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// \brief Iterator that walks over the capture initialization arguments.
typedef Expr **capture_init_iterator;
typedef llvm::iterator_range<capture_init_iterator> capture_init_range;
/// \brief Const iterator that walks over the capture initialization
/// arguments.
typedef Expr *const *const_capture_init_iterator;
typedef llvm::iterator_range<const_capture_init_iterator>
const_capture_init_range;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// \brief Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// \brief Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getLocStart() const LLVM_READONLY {
return getCapturedStmt()->getLocStart();
}
SourceLocation getLocEnd() const LLVM_READONLY {
return getCapturedStmt()->getLocEnd();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
friend class ASTStmtReader;
};
} // end namespace clang
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(12*t1+Ny+21,32)),floord(24*t2+Ny+20,32)),floord(24*t1-24*t2+Nz+Ny+19,32));t3++) {
for (t4=max(max(max(0,ceild(3*t1-31,32)),ceild(24*t2-Nz-124,128)),ceild(32*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(12*t1+Nx+21,128)),floord(24*t2+Nx+20,128)),floord(32*t3+Nx+28,128)),floord(24*t1-24*t2+Nz+Nx+19,128));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),32*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),32*t3+30),128*t4+126),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
oyranos_cmm_oyra_image_scale.c | /** @file oyranos_cmm_oyra_image.c
*
* Oyranos is an open source Color Management System
*
* @par Copyright:
* 2013-2016 (C) Kai-Uwe Behrmann
*
* @brief modules for Oyranos
* @internal
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* new BSD <http://www.opensource.org/licenses/BSD-3-Clause>
* @since 2013/06/10
*/
#include "oyCMMapi4_s.h"
#include "oyCMMapi7_s.h"
#include "oyCMMui_s.h"
#include "oyConnectorImaging_s.h"
#include "oyRectangle_s.h"
#include "oyRectangle_s_.h"
#include "oyranos_cmm.h"
#include "oyranos_cmm_oyra.h"
#include "oyranos_helper.h"
#include "oyranos_i18n.h"
#include "oyranos_string.h"
#include <math.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef HAVE_POSIX
#include <stdint.h> /* UINT32_MAX */
#endif
/* OY_IMAGE_SCALE_REGISTRATION */
/* OY_IMAGE_SCALE_REGISTRATION ----------------------------------------------*/
/** @func oyraFilter_ImageScaleRun
* @brief implement oyCMMFilter_GetNext_f()
*
* @version Oyranos: 0.9.6
* @date 2016/04/04
* @since 2013/06/10 (Oyranos: 0.9.5)
*/
int oyraFilter_ImageScaleRun ( oyFilterPlug_s * requestor_plug,
oyPixelAccess_s * ticket )
{
int result = 0, error = 0;
oyFilterSocket_s * socket = 0;
oyFilterNode_s * input_node = 0,
* node = 0;
oyFilterPlug_s * plug = 0;
oyImage_s * image = 0;
int image_width;
int dirty = 0;
socket = oyFilterPlug_GetSocket( requestor_plug );
node = oyFilterSocket_GetNode( socket );
image = (oyImage_s*)oyFilterSocket_GetData( socket );
if(!image)
{
result = 1;
goto clean_scale1;
}
image_width = oyImage_GetWidth(image);
if(oy_debug)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"image [%d](%d)\n",OY_DBG_ARGS_,oyStruct_GetId((oyStruct_s*)image),oyImage_GetWidth(image) );
{
oyRectangle_s_ ticket_roi_pix_ = {oyOBJECT_RECTANGLE_S,0,0,0, 0,0,0,0};
oyRectangle_s * ticket_roi_pix = (oyRectangle_s*)&ticket_roi_pix_;
double scale = 1.0;
oyOptions_s * node_opts = oyFilterNode_GetOptions( node, 0 );
if(!node_opts)
dirty = 1;
if(dirty)
{
result = dirty;
goto clean_scale2;
}
plug = oyFilterNode_GetPlug( node, 0 );
/* select node */
input_node = oyFilterNode_GetPlugNode( node, 0 );
/* find filters own scale factor */
error = oyOptions_FindDouble( node_opts,
"//" OY_TYPE_STD "/scale/scale",
0, &scale );
if(error) WARNc2_S("%s %d", _("found issues"),error);
oyPixelAccess_RoiToPixels( ticket, NULL, &ticket_roi_pix );
if(oy_debug > 2)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"%s",OY_DBG_ARGS_, oyPixelAccess_Show(ticket));
if(scale != 1.0 && scale > 0)
{
oyImage_s * output_image = oyPixelAccess_GetOutputImage( ticket );
int output_image_width = oyImage_GetWidth( output_image );
oyRectangle_s_ new_ticket_array_roi_pix_ = {oyOBJECT_RECTANGLE_S,0,0,0, 0,0,0,0};
oyRectangle_s * new_ticket_array_roi = NULL,
* new_ticket_array_roi_pix = (oyRectangle_s*)&new_ticket_array_roi_pix_,
* image_pix = oyRectangle_NewWith( 0,0,+
oyImage_GetWidth(image),
oyImage_GetHeight(image), 0);
oyPixelAccess_s * new_ticket = 0;
/* start_xy is defined relative to the tickets output image width */
double start_x_src_pixel = oyPixelAccess_GetStart( ticket, 0 )
* output_image_width,
start_y_src_pixel = oyPixelAccess_GetStart( ticket, 1 )
* output_image_width,
start_x_dst_pixel,start_y_dst_pixel;
int layout_src = oyImage_GetPixelLayout( image, oyLAYOUT ),
layout_dst = oyImage_GetPixelLayout( output_image, oyLAYOUT );
int channels_src = oyToChannels_m( layout_src );
int channels_dst = oyToChannels_m( layout_dst );
/* avoid division by zero */
if(!channels_src) channels_src = 1;
if(!channels_dst) channels_dst = 1;
new_ticket = oyPixelAccess_Copy( ticket, ticket->oy_ );
oyPixelAccess_SetArray( new_ticket, 0, 0 );
oyPixelAccess_SetOutputImage( new_ticket, image );
if(oy_debug)
{
oyArray2d_s * a_dest = oyPixelAccess_GetArray( ticket );
int a_width_dest = oyArray2d_GetWidth( a_dest ) / channels_dst;
oyra_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"output_image [%d](%d*%d)-array[%d](w%d) image [%d](%d*%d)\n",
OY_DBG_ARGS_,
oyStruct_GetId((oyStruct_s*)output_image),
oyImage_GetWidth(output_image),channels_dst,
oyStruct_GetId((oyStruct_s*)a_dest), a_width_dest,
oyStruct_GetId((oyStruct_s*)image),
oyImage_GetWidth(image), channels_src );
oyArray2d_Release( &a_dest );
}
oyPixelAccess_RoiToPixels( ticket, NULL, &new_ticket_array_roi_pix );
/* scale */
oyRectangle_Scale( new_ticket_array_roi_pix, 1.0/scale );
oyRectangle_Round( new_ticket_array_roi_pix );
/* convert to new_ticket relative dimensions */
oyPixelAccess_PixelsToRoi( new_ticket, new_ticket_array_roi_pix,
&new_ticket_array_roi );
/* adapt the access start and write relative to new tickets image width */
start_x_dst_pixel = OY_ROUND(start_x_src_pixel / scale);
start_y_dst_pixel = OY_ROUND(start_y_src_pixel / scale);
oyPixelAccess_ChangeRectangle( new_ticket,
start_x_dst_pixel / image_width,
start_y_dst_pixel / image_width,
new_ticket_array_roi );
if(oy_debug)
oyMessageFunc_p( oy_debug?oyMSG_DBG:oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"ticket: %s image[%d](%d) -> [%d](%d) scale %f\n",OY_DBG_ARGS_,
oyPixelAccess_Show( ticket ),
oyStruct_GetId((oyStruct_s*)image),oyImage_GetWidth(image),
oyStruct_GetId((oyStruct_s*)output_image),oyImage_GetWidth(output_image), scale );
if(oy_debug)
{
char * troi;
troi = strdup( oyRectangle_Show(ticket_roi_pix) );
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"ticket_roi_pix: %s %s %f new_ticket_array_roi_pix: %s",OY_DBG_ARGS_,
troi, "scale factor:", scale,
oyRectangle_Show(new_ticket_array_roi_pix) );
if(troi) free(troi);
oyra_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"image_pix: %s start_x:%g start_y:%g",OY_DBG_ARGS_,
oyRectangle_Show(image_pix),
oyPixelAccess_GetStart( new_ticket, 0 )*image_width,
oyPixelAccess_GetStart( new_ticket, 1 )*image_width );
}
if(oyRectangle_CountPoints( new_ticket_array_roi ) > 0)
{
int nw,nh,w,h,x,y,xs,ys;
oyArray2d_s * array_in,
* array_out;
uint8_t ** array_in_data,
** array_out_data;
/* get pixel layout infos for copying */
oyDATATYPE_e data_type_in = oyToDataType_m( layout_src ),
data_type_out = oyToDataType_m( layout_dst );
int bps_in = oyDataTypeGetSize( data_type_in ),
bps_out = oyDataTypeGetSize( data_type_out );
int issue = 0;
/* get the source pixels */
if(oy_debug > 2)
oyra_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"%s %s",OY_DBG_ARGS_,
"Run new_ticket",
oyPixelAccess_Show( new_ticket ) );
result = oyFilterNode_Run( input_node, plug, new_ticket );
/* prepare the current ticket */
oyPixelAccess_SetArrayFocus( ticket, 0 );
/* get the channel buffers */
array_in = oyPixelAccess_GetArray( new_ticket );
array_out = oyPixelAccess_GetArray( ticket );
array_in_data = oyArray2d_GetData( array_in );
array_out_data = oyArray2d_GetData( array_out );
w = oyArray2d_GetWidth ( array_out ) / channels_dst;
h = oyArray2d_GetHeight( array_out );
nw = oyArray2d_GetWidth ( array_in ) / channels_src;
nh = oyArray2d_GetHeight( array_in );
if(nw < (int)OY_ROUND(w/scale)) issue |= 1;
if(nh < (int)OY_ROUND(h/scale)) issue |= 2;
if(issue || oy_debug > 2)
{
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"ticket: %s",OY_DBG_ARGS_, oyPixelAccess_Show(ticket));
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"new_ti: %s",OY_DBG_ARGS_, oyPixelAccess_Show(new_ticket));
}
if(issue || oy_debug)
{
char *a,*b,*c;
a = strdup(oyRectangle_Show( ticket_roi_pix ));
b = strdup(oyRectangle_Show( image_pix ));
c = strdup(oyRectangle_Show( new_ticket_array_roi_pix ));
oyra_msg( issue?oyMSG_ERROR:oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"node [%d] scale: %.02f old roi %s/%s(image) -> new roi %s array_in[%d](%dx%d)%dc w/scale=%g h/scale=%g-> array_out[%d](%dx%d)%dc"
"%s%s%s",OY_DBG_ARGS_,
oyStruct_GetId( (oyStruct_s*)node ), scale,
a,b,c,
oyStruct_GetId( (oyStruct_s*)array_in ), nw,nh,channels_src, w/scale, h/scale,
oyStruct_GetId( (oyStruct_s*)array_out ), w,h, channels_dst,
issue?" found issue(s): too":"",
issue & 1 ? " wide":"",
issue & 2 ? " heigh":"" );
if(a) {free(a);} if(b) {free(b);} if(c) {free(c);}
}
/* do the scaling while copying the channels */
#if defined(USE_OPENMP)
#pragma omp parallel for private(x,xs,ys)
#endif
for(y = 0; y < h; ++y)
{
ys = y/scale;
if(OY_ROUNDp(ys) >= nh)
{
if(oy_debug || (OY_ROUNDp(ys) >= (nh + 1)))
oyra_msg( oy_debug?oyMSG_DBG:oyMSG_ERROR, (oyStruct_s*)ticket,
OY_DBG_FORMAT_"scale:%g y:%d h:%d ys:%d/%g nh:%d\n",
OY_DBG_ARGS_, scale, y,h,ys,y/scale,nh);
} else
for(x = 0; x < w; ++x)
{
xs = x/scale;
if(OY_ROUNDp(xs) < nw)
{
#if 0
/* optimisations which have not much benefit */
int chars = channels_src*bps_in, b;
uint32_t ** array_out_4 = (uint32_t**)array_out_data;
uint32_t ** array_in_4 = (uint32_t**)array_in_data;
if(bps_in == 4)
for( b = 0; b < channels_src; ++b )
array_out_4[y] [x *channels_dst+b] =
array_in_4 [ys][xs *channels_src+b];
else
for( b = 0; b < chars; ++b )
array_out_data[y] [x *channels_dst*bps_out+b] =
array_in_data [ys][xs *channels_src*bps_in +b];
#else
memmove( &array_out_data[y] [x *channels_dst*bps_out],
&array_in_data [ys][xs *channels_src*bps_in], channels_src*bps_in );
#endif
}
}
}
oyPixelAccess_Release( &new_ticket );
oyArray2d_Release( &array_in );
oyArray2d_Release( &array_out );
}
oyRectangle_Release( &new_ticket_array_roi );
//oyRectangle_Release( &new_ticket_array_roi_pix );
oyRectangle_Release( &image_pix );
} else /* scale == 1.0 */
{
result = oyFilterNode_Run( input_node, plug, ticket );
}
clean_scale2:
oyOptions_Release( &node_opts );
oyFilterPlug_Release( &plug );
//oyRectangle_Release( &ticket_roi_pix );
oyFilterNode_Release( &input_node );
}
clean_scale1:
oyImage_Release( &image );
oyFilterSocket_Release( &socket );
oyFilterNode_Release( &node );
return result;
}
#define OY_IMAGE_SCALE_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "scale"
/** @brief oyra oyCMMapi7_s implementation
*
* a filter providing a scale image filter
*
* @version Oyranos: 0.9.5
* @since 2013/06/14 (Oyranos: 0.9.5)
* @date 2013/06/14
*/
oyCMMapi_s * oyraApi7ImageScaleCreate(void)
{
oyCMMapi7_s * scale7;
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
static oyDATATYPE_e data_types[7] = {oyUINT8, oyUINT16, oyUINT32,
oyHALF, oyFLOAT, oyDOUBLE, 0};
oyConnectorImaging_s * plug = oyConnectorImaging_New(0),
* socket = oyConnectorImaging_New(0);
static oyConnectorImaging_s * plugs[2] = {0,0},
* sockets[2] = {0,0};
plugs[0] = plug;
sockets[0] = socket;
oyConnectorImaging_SetDataTypes( plug, data_types, 6 );
oyConnectorImaging_SetReg( plug, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( plug, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( plug, oyCMMgetImageConnectorPlugText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( plug, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_ID, 1 );
oyConnectorImaging_SetDataTypes( socket, data_types, 6 );
oyConnectorImaging_SetReg( socket, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( socket, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( socket, oyCMMgetImageConnectorSocketText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( socket, 0 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_ID, 1 );
scale7 = oyCMMapi7_Create ( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_SCALE_REGISTRATION,
cmm_version, module_api,
NULL,
oyraFilter_ImageScaleRun,
(oyConnector_s**)plugs, 1, 0,
(oyConnector_s**)sockets, 1, 0,
0, 0 );
return (oyCMMapi_s*) scale7;
}
const char * oyraApi4UiImageScaleGetText (
const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
if(strcmp(select,"name") == 0)
{
if(type == oyNAME_NICK)
return "image_scale";
else if(type == oyNAME_NAME)
return _("Image[scale]");
else if(type == oyNAME_DESCRIPTION)
return _("Scale Image Filter Object");
} else if(strcmp(select,"help") == 0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("The filter is used to reduce pixels.");
else if(type == oyNAME_DESCRIPTION)
{
static char * help_desc = NULL;
if(!help_desc)
oyStringAddPrintf( &help_desc, 0,0, "%s\n"
" %s \n"
" \n"
" start_xy %s \n"
" | / \n"
" +-----|---------------/--------------+ \n"
" | | / | \n"
" | | / +--- %s \n"
" | ---+------------/----------+ | \n"
" | | / +---------- %s \n"
" | | +------+--------+ | | \n"
" | | | | | | \n"
" | | | | | | \n"
" | | +---------------+ | | \n"
" | | | | \n"
" | +-----------------------+ | \n"
" | | \n"
" +------------------------------------+ \n"
"",
_("The filter will expect a \"scale\" double option and will create, fill and process a according data version with a new job ticket. The new job tickets image, array and output_array_roi will be divided by the supplied \"scale\" factor. It's plug will request the divided image sizes from the source socket."),
_("Relation of positional parameters:"),
/* output image region of interesst */
_("output_array_roi"),
_("source image"),
_("output image") );
return help_desc;
}
} else if(strcmp(select,"category") == 0)
{
if(type == oyNAME_NICK)
return "category";
else if(type == oyNAME_NAME)
return _("Image/Simple Image[scale]");
else if(type == oyNAME_DESCRIPTION)
return _("The filter is used to reduce pixels.");
}
return 0;
}
/** @brief oyra oyCMMapi4_s implementation
*
* a filter providing a scale image filter
*
* @version Oyranos: 0.9.5
* @since 2013/06/14 (Oyranos: 0.9.5)
* @date 2013/06/14
*/
oyCMMapi_s * oyraApi4ImageScaleCreate(void)
{
static const char * oyra_api4_ui_image_scale_texts[] = {"name", "help", "category", 0};
oyCMMui_s * ui = oyCMMui_Create( "Image/Simple Image[scale]", /* category */
oyraApi4UiImageScaleGetText,
oyra_api4_ui_image_scale_texts, 0 );
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
oyCMMapi4_s * scale4 = oyCMMapi4_Create( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_SCALE_REGISTRATION,
cmm_version, module_api,
NULL,
NULL,
NULL,
ui,
NULL );
return (oyCMMapi_s*)scale4;
}
/* OY_IMAGE_SCALE_REGISTRATION ----------------------------------------------*/
/* ---------------------------------------------------------------------------*/
|
AlloyImage.h | /*
* Copyright(C) 2015, Blake C. Lucas, Ph.D. (img.science@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef ALLOYIMAGE2D_H_INCLUDE_GUARD
#define ALLOYIMAGE2D_H_INCLUDE_GUARD
#include "AlloyCommon.h"
#include "AlloyMath.h"
#include "sha2.h"
#include "AlloyFileUtil.h"
#include "cereal/types/vector.hpp"
#include <vector>
#include <functional>
#include <fstream>
#include <random>
namespace aly {
bool SANITY_CHECK_IMAGE();
bool SANITY_CHECK_IMAGE_IO();
bool SANITY_CHECK_PYRAMID();
enum class ImageType {
BYTE = 0,
UBYTE = 1,
SHORT = 2,
USHORT = 3,
INT = 4,
UINT = 5,
FLOAT = 6,
DOUBLE = 7
};
template<class L, class R> std::basic_ostream<L, R>& operator <<(
std::basic_ostream<L, R> & ss, const ImageType& type) {
switch (type) {
case ImageType::BYTE:
return ss << "byte";
case ImageType::UBYTE:
return ss << "ubyte";
case ImageType::SHORT:
return ss << "short";
case ImageType::USHORT:
return ss << "ushort";
case ImageType::INT:
return ss << "int";
case ImageType::UINT:
return ss << "uint";
case ImageType::FLOAT:
return ss << "float";
case ImageType::DOUBLE:
return ss << "double";
}
return ss;
}
template<class T, int C, ImageType I> struct Image;
template<class T, int C, ImageType I> void WriteImageToRawFile(
const std::string& fileName, const Image<T, C, I>& img);
template<class T, int C, ImageType I> struct Image {
protected:
int x, y;
std::string hashCode;
public:
std::vector<vec<T, C>> data;
typedef vec<T, C> ValueType;
typedef typename std::vector<ValueType>::iterator iterator;
typedef typename std::vector<ValueType>::const_iterator const_iterator;
typedef typename std::vector<ValueType>::reverse_iterator reverse_iterator;
iterator begin() {
return data.begin();
}
iterator end() {
return data.end();
}
const_iterator cbegin() const {
return data.cbegin();
}
const_iterator cend() const {
return data.cend();
}
reverse_iterator rbegin() {
return data.rbegin();
}
reverse_iterator rend() {
return data.rend();
}
reverse_iterator rbegin() const {
return data.rbegin();
}
reverse_iterator rend() const {
return data.rend();
}
int width;
int height;
uint64_t id;
const int channels;
const ImageType type;
std::string updateHashCode(size_t MAX_SAMPLES = 0, HashMethod method =
HashMethod::SHA256);
std::string getHashCode() {
return hashCode;
}
template<class Archive> void serialize(Archive & archive) {
archive(cereal::make_nvp(MakeString() << type << channels, id),
CEREAL_NVP(width), CEREAL_NVP(height), CEREAL_NVP(x),
CEREAL_NVP(y), CEREAL_NVP(hashCode));
}
void writeToXML(const std::string& fileName) const {
WriteImageToRawFile(fileName, *this);
}
void set(const T& val) {
data.assign(data.size(), vec<T, C>(val));
}
void set(const vec<T, C>& val) {
data.assign(data.size(), val);
}
void set(T* val) {
if (val == nullptr)
return;
size_t offset = 0;
for (vec<T, C>& x : data) {
for (int c = 0; c < C; c++) {
x[c] = val[offset++];
}
}
}
void set(const std::vector<vec<T, C>>& val) {
data = val;
}
void set(vec<T, C>* val) {
if (val == nullptr)
return;
size_t offset = 0;
for (vec<T, C>& x : data) {
x = val[offset++];
}
}
void set(const Image<T, C, I>& other) {
resize(other.width, other.height);
id = other.id;
x = other.x;
y = other.y;
set(other.data);
}
std::string getTypeName() const {
return MakeString() << type << channels;
}
Image(int w, int h, int x = 0, int y = 0, uint64_t id = 0) :
x(x), y(y), data(w * h), width(w), height(h), id(id), channels(C), type(
I) {
}
Image(int w, int h, int2 pos, uint64_t id = 0) :
x(pos.x), y(pos.y), data(w * h), width(w), height(h), id(id), channels(
C), type(I) {
}
Image(T* ptr, int w, int h, int x = 0, int y = 0, uint64_t id = 0) :
Image(w, h, x, y, id) {
set(ptr);
}
Image(vec<T, C>* ptr, int w, int h, int x = 0, int y = 0, uint64_t id = 0) :
Image(w, h, x, y, id) {
set(ptr);
}
Image(int w, int h, vec<T, C>* ptr) :
Image(w, h, 0, 0, 0) {
set(ptr);
}
Image(std::vector<vec<T, C>>& ref, int w, int h, int x = 0, int y = 0,
uint64_t id = 0) :
x(x), y(y), data(ref), width(w), height(h), id(id), channels(C), type(
I) {
}
Image() :
x(0), y(0), width(0), height(0), id(0), channels(C), type(I) {
}
Image(const Image<T, C, I>& img) :
Image(img.width, img.height, img.x, img.y, img.id) {
set(img.data);
}
Image<T, C, I>& operator=(const Image<T, C, I>& rhs) {
if (this == &rhs)
return *this;
this->resize(rhs.width, rhs.height);
this->x = rhs.x;
this->y = rhs.y;
this->id = rhs.id;
this->set(rhs.data);
return *this;
}
int2 dimensions() const {
return int2(width, height);
}
int2 position() const {
return int2(x, y);
}
void setPosition(int xx, int yy) {
x = xx;
y = yy;
}
void setPosition(const int2& pos) {
x = pos.x;
y = pos.y;
}
size_t size() const {
return data.size();
}
size_t typeSize() const {
return sizeof(vec<T, C>);
}
void resize(int w, int h) {
data.resize(w * h);
data.shrink_to_fit();
width = w;
height = h;
}
inline void clear() {
data.clear();
data.shrink_to_fit();
width = 0;
height = 0;
}
vec<T, C>* vecPtr() {
if (data.size() == 0)
return nullptr;
return data.data();
}
const vec<T, C>* vecPtr() const {
if (data.size() == 0)
return nullptr;
return data.data();
}
T* ptr() {
if (data.size() == 0)
return nullptr;
return &(data.front()[0]);
}
const T* ptr() const {
if (data.size() == 0)
return nullptr;
return &(data.front()[0]);
}
void setZero() {
data.assign(data.size(), vec<T, C>((T)0));
}
const vec<T, C>& operator[](const size_t i) const {
return data[i];
}
vec<T, C>& operator[](const size_t i) {
return data[i];
}
vec<T, C>& operator()(int i, int j) {
return data[clamp(i, 0, width - 1) + clamp(j, 0, height - 1) * width];
}
vec<T, C>& operator()(const int2 ij) {
return data[clamp(ij.x, 0, width - 1)
+ clamp(ij.y, 0, height - 1) * width];
}
const vec<T, C>& operator()(int i, int j) const {
return data[clamp(i, 0, width - 1) + clamp(j, 0, height - 1) * width];
}
const vec<T, C>& operator()(const int2 ij) const {
return data[clamp(ij.x, 0, width - 1)
+ clamp(ij.y, 0, height - 1) * width];
}
vec<float, C> operator()(float x, float y) {
int i = static_cast<int>(std::floor(x));
int j = static_cast<int>(std::floor(y));
vec<float, C> rgb00 = vec<float, C>(operator()(i, j));
vec<float, C> rgb10 = vec<float, C>(operator()(i + 1, j));
vec<float, C> rgb11 = vec<float, C>(operator()(i + 1, j + 1));
vec<float, C> rgb01 = vec<float, C>(operator()(i, j + 1));
float dx = x - i;
float dy = y - j;
return ((rgb00 * (1.0f - dx) + rgb10 * dx) * (1.0f - dy)
+ (rgb01 * (1.0f - dx) + rgb11 * dx) * dy);
}
vec<float, C> operator()(float x, float y) const {
int i = static_cast<int>(std::floor(x));
int j = static_cast<int>(std::floor(y));
vec<float, C> rgb00 = vec<float, C>(operator()(i, j));
vec<float, C> rgb10 = vec<float, C>(operator()(i + 1, j));
vec<float, C> rgb11 = vec<float, C>(operator()(i + 1, j + 1));
vec<float, C> rgb01 = vec<float, C>(operator()(i, j + 1));
float dx = x - i;
float dy = y - j;
return ((rgb00 * (1.0f - dx) + rgb10 * dx) * (1.0f - dy)
+ (rgb01 * (1.0f - dx) + rgb11 * dx) * dy);
}
matrix<float, C, 2> gradient(float x, float y) const {
vec<float, C> v21 = vec<float, C>(operator()(x + 1, y));
vec<float, C> v12 = vec<float, C>(operator()(x, y + 1));
vec<float, C> v10 = vec<float, C>(operator()(x, y - 1));
vec<float, C> v01 = vec<float, C>(operator()(x - 1, y));
vec<float, C> dx = ((v21 - v01) * 0.5f);
vec<float, C> dy = ((v12 - v10) * 0.5f);
matrix<float, C, 2> G;
G.x = dx;
G.y = dy;
return G;
}
matrix<double, C, 2> gradient(double x, double y) const {
vec<double, C> v21 = vec<double, C>(operator()(x + 1, y));
vec<double, C> v12 = vec<double, C>(operator()(x, y + 1));
vec<double, C> v10 = vec<double, C>(operator()(x, y - 1));
vec<double, C> v01 = vec<double, C>(operator()(x - 1, y));
vec<double, C> dx = ((v21 - v01) * 0.5);
vec<double, C> dy = ((v12 - v10) * 0.5);
matrix<double, C, 2> G;
G.x = dx;
G.y = dy;
return G;
}
matrix<T, C, 2> gradient(int x, int y) const {
vec<T, C> v21 = operator()(x + 1, y);
vec<T, C> v12 = operator()(x, y + 1);
vec<T, C> v10 = operator()(x, y - 1);
vec<T, C> v01 = operator()(x - 1, y);
vec<T, C> dx = ((v21 - v01) * T(0.5));
vec<T, C> dy = ((v12 - v10) * T(0.5));
matrix<T, C, 2> G;
G.x = dx;
G.y = dy;
return G;
}
vec<double, C> operator()(double x, double y) {
int i = static_cast<int>(std::floor(x));
int j = static_cast<int>(std::floor(y));
vec<double, C> rgb00 = vec<double, C>(operator()(i, j));
vec<double, C> rgb10 = vec<double, C>(operator()(i + 1, j));
vec<double, C> rgb11 = vec<double, C>(operator()(i + 1, j + 1));
vec<double, C> rgb01 = vec<double, C>(operator()(i, j + 1));
double dx = x - i;
double dy = y - j;
return ((rgb00 * (1.0 - dx) + rgb10 * dx) * (1.0 - dy)
+ (rgb01 * (1.0 - dx) + rgb11 * dx) * dy);
}
vec<double, C> operator()(double x, double y) const {
int i = static_cast<int>(std::floor(x));
int j = static_cast<int>(std::floor(y));
vec<double, C> rgb00 = vec<double, C>(operator()(i, j));
vec<double, C> rgb10 = vec<double, C>(operator()(i + 1, j));
vec<double, C> rgb11 = vec<double, C>(operator()(i + 1, j + 1));
vec<double, C> rgb01 = vec<double, C>(operator()(i, j + 1));
double dx = x - i;
double dy = y - j;
return ((rgb00 * (1.0 - dx) + rgb10 * dx) * (1.0 - dy)
+ (rgb01 * (1.0 - dx) + rgb11 * dx) * dy);
}
inline vec<float, C> operator()(const vec<float, 2>& pt) {
return operator()(pt.x, pt.y);
}
inline vec<double, C> operator()(const vec<double, 2>& pt) {
return operator()(pt.x, pt.y);
}
inline vec<float, C> operator()(const vec<float, 2>& pt) const {
return operator()(pt.x, pt.y);
}
inline vec<double, C> operator()(const vec<double, 2>& pt) const {
return operator()(pt.x, pt.y);
}
template<class F> void apply(F f) {
size_t sz = size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
f(offset, data[offset]);
}
}
void downSample(Image<T, C, I>& out) const {
static const double Kernel[5][5] = { { 1, 4, 6, 4, 1 }, { 4, 16, 24, 16,
4 }, { 6, 24, 36, 24, 6 }, { 4, 16, 24, 16, 4 },
{ 1, 4, 6, 4, 1 } };
out.resize(width / 2, height / 2);
#pragma omp parallel for
for (int i = 0; i < out.width; i++) {
for (int j = 0; j < out.height; j++) {
vec<double, C> vsum(0.0);
for (int ii = 0; ii < 5; ii++) {
for (int jj = 0; jj < 5; jj++) {
vsum += Kernel[ii][jj]
* vec<double, C>(
operator()(2 * i + ii - 2,
2 * j + jj - 2));
}
}
out(i, j) = vec<T, C>(vsum / 256.0);
}
}
}
void upSample(Image<T, C, I>& out) const {
static const double Kernel[5][5] = { { 1, 4, 6, 4, 1 }, { 4, 16, 24, 16,
4 }, { 6, 24, 36, 24, 6 }, { 4, 16, 24, 16, 4 },
{ 1, 4, 6, 4, 1 } };
if (out.size() == 0)
out.resize(width * 2, height * 2);
#pragma omp parallel for
for (int i = 0; i < out.width; i++) {
for (int j = 0; j < out.height; j++) {
vec<double, C> vsum(0.0);
for (int ii = 0; ii < 5; ii++) {
for (int jj = 0; jj < 5; jj++) {
int iii = i + ii - 2;
int jjj = j + jj - 2;
if (iii % 2 == 0 && jjj % 2 == 0) {
vsum += Kernel[ii][jj]
* vec<double, C>(
operator()(iii / 2, jjj / 2));
}
}
}
out(i, j) = vec<T, C>(vsum / 64.0);
}
}
}
Image<T, C, I> downSample() const {
Image<T, C, I> out;
downSample(out);
return out;
}
Image<T, C, I> upSample() const {
Image<T, C, I> out;
upSample(out);
return out;
}
vec<T, C> min() const {
vec<T, C> minVal(std::numeric_limits<T>::max());
for (const vec<T, C>& val : data) {
minVal = aly::minVec(val, minVal);
}
return minVal;
}
vec<T, C> max() const {
vec<T, C> maxVal(std::numeric_limits<T>::min());
for (const vec<T, C>& val : data) {
maxVal = aly::maxVec(val, maxVal);
}
return maxVal;
}
std::pair<vec<T, C>, vec<T, C>> range() const {
vec<T, C> maxVal(std::numeric_limits<T>::min());
vec<T, C> minVal(std::numeric_limits<T>::max());
for (const vec<T, C>& val : data) {
maxVal = aly::maxVec(val, maxVal);
minVal = aly::minVec(val, minVal);
}
return std::pair<vec<T, C>, vec<T, C>>(minVal, maxVal);
}
vec<T, C> mean() const {
vec<double, C> mean(0.0);
for (const vec<T, C>& val : data) {
mean += vec<double, C>(val);
}
mean = mean / (double) data.size();
return vec<T, C>(mean);
}
vec<T, C> median() const {
std::vector<T> bands[C];
for (int c = 0; c < C; c++) {
bands[c].resize(data.size());
}
size_t index = 0;
for (const vec<T, C>& val : data) {
for (int c = 0; c < C; c++) {
bands[c][index] = val[c];
}
index++;
}
#pragma omp parallel for
for (int c = 0; c < C; c++) {
std::sort(bands[c].begin(), bands[c].end());
}
vec<T, C> med;
if (data.size() % 2 == 0) {
for (int c = 0; c < C; c++) {
med[c] = T(
((double) bands[c][data.size() / 2]
+ (double) bands[c][data.size() / 2 - 1])
* 0.5f);
}
} else {
for (int c = 0; c < C; c++) {
med[c] = bands[c][data.size() / 2];
}
}
return med;
}
vec<T, C> mad() const {
if (data.size() <= 2)
return vec<T, C>(T(0));
vec<T, C> med = median();
std::vector<T> bands[C];
for (int c = 0; c < C; c++) {
bands[c].resize(data.size());
}
size_t index = 0;
for (const vec<T, C>& val : data) {
vec<T, C> e = aly::abs(val - med);
for (int c = 0; c < C; c++) {
bands[c][index] = e[c];
}
index++;
}
#pragma omp parallel for
for (int c = 0; c < C; c++) {
std::sort(bands[c].begin(), bands[c].end());
}
vec<T, C> mad;
if (data.size() % 2 == 0) {
for (int c = 0; c < C; c++) {
mad[c] = T(
((double) bands[c][data.size() / 2]
+ (double) bands[c][data.size() / 2 - 1])
* 0.5f);
}
} else {
for (int c = 0; c < C; c++) {
mad[c] = bands[c][data.size() / 2];
}
}
return mad;
}
vec<T, C> madStdDev() const {
return vec<T, C>(1.4826 * vec<double, C>(mad()));
}
vec<T, C> stdDev() const {
if (data.size() < 2) {
return vec<T, C>(T(0));
}
vec<T, C> avg = mean();
vec<double, C> var(0.0);
for (const vec<T, C>& val : data) {
vec<double, C> e = vec<double, C>(val - avg);
var += e * e;
}
var = var / (double) (data.size() - 1);
return vec<T, C>(aly::sqrt(var));
}
};
template<class T, int C, ImageType I> std::string Image<T, C, I>::updateHashCode(
size_t MAX_SAMPLES, HashMethod method) {
if (MAX_SAMPLES == 0) {
hashCode = HashCode(data, method);
} else {
const size_t seed = 83128921L;
std::mt19937 mt(seed);
std::uniform_int_distribution<int> wSampler(0, width - 1);
std::uniform_int_distribution<int> hSampler(0, height - 1);
std::vector<vec<T, C>> sample(MAX_SAMPLES);
for (int i = 0; i < (int) MAX_SAMPLES; i++) {
sample[i] = this->operator()(wSampler(mt), hSampler(mt));
}
hashCode = HashCode(sample, method);
}
return hashCode;
}
template<class T, int C, ImageType I> void Transform(Image<T, C, I>& im1,
Image<T, C, I>& im2,
const std::function<void(vec<T, C>&, vec<T, C>&)>& func) {
if (im1.dimensions() != im2.dimensions())
throw std::runtime_error(
MakeString() << "Image dimensions do not match. "
<< im1.dimensions() << "!=" << im2.dimensions());
size_t sz = im1.size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
func(im1.data[offset], im2.data[offset]);
}
}
template<class T, int C, ImageType I> void Transform(Image<T, C, I>& im1,
const std::function<void(vec<T, C>&)>& func) {
size_t sz = im1.size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
func(im1.data[offset]);
}
}
template<class T, int C, ImageType I> void Transform(Image<T, C, I>& im1,
const Image<T, C, I>& im2,
const std::function<void(vec<T, C>&, const vec<T, C>&)>& func) {
if (im1.dimensions() != im2.dimensions())
throw std::runtime_error(
MakeString() << "Image dimensions do not match. "
<< im1.dimensions() << "!=" << im2.dimensions());
size_t sz = im1.size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
func(im1.data[offset], im2.data[offset]);
}
}
template<class T, int C, ImageType I> void Transform(Image<T, C, I>& im1,
const Image<T, C, I>& im2, const Image<T, C, I>& im3,
const std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)>& func) {
if (im1.dimensions() != im2.dimensions())
throw std::runtime_error(
MakeString() << "Image dimensions do not match. "
<< im1.dimensions() << "!=" << im2.dimensions());
size_t sz = im1.size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
func(im1.data[offset], im2.data[offset], im3.data[offset]);
}
}
template<class T, int C, ImageType I> void Transform(Image<T, C, I>& im1,
const Image<T, C, I>& im2, const Image<T, C, I>& im3,
const Image<T, C, I>& im4,
const std::function<
void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&,
const vec<T, C>&)>& func) {
if (im1.dimensions() != im2.dimensions())
throw std::runtime_error(
MakeString() << "Image dimensions do not match. "
<< im1.dimensions() << "!=" << im2.dimensions());
size_t sz = im1.size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
func(im1.data[offset], im2.data[offset], im3.data[offset],
im4.data[offset]);
}
}
template<class T, int C, ImageType I> void Transform(Image<T, C, I>& im1,
Image<T, C, I>& im2,
const std::function<void(int i, int j, vec<T, C>& val1, vec<T, C>& val2)>& func) {
if (im1.dimensions() != im2.dimensions())
throw std::runtime_error(
MakeString() << "Image dimensions do not match. "
<< im1.dimensions() << "!=" << im2.dimensions());
#pragma omp parallel for
for (int j = 0; j < im1.height; j++) {
for (int i = 0; i < im1.width; i++) {
size_t offset = i + j * im1.width;
func(i, j, im1.data[offset], im2.data[offset]);
}
}
}
template<class T, int C, ImageType I> void Transform(Image<T, C, I>& im1,
Image<T, C, I>& im2,
const std::function<
void(size_t offset, vec<T, C>& val1, vec<T, C>& val2)>& func) {
if (im1.dimensions() != im2.dimensions())
throw std::runtime_error(
MakeString() << "Image dimensions do not match. "
<< im1.dimensions() << "!=" << im2.dimensions());
size_t sz = im1.size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
func(offset, im1.data[offset], im2.data[offset]);
}
}
template<class T, class L, class R, int C, ImageType I> std::basic_ostream<L, R> & operator <<(
std::basic_ostream<L, R> & ss, const Image<T, C, I> & A) {
ss << "Image (" << A.getTypeName() << "): " << A.id << " Position: "
<< A.position() << " Dimensions: [" << A.width << "," << A.height
<< "]";
return ss;
}
template<class T, int C, ImageType I> Image<T, C, I> operator+(
const vec<T, C>& scalar, const Image<T, C, I>& img) {
Image<T, C, I> out(img.width, img.height, img.position());
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1=scalar+val2;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator-(
const vec<T, C>& scalar, const Image<T, C, I>& img) {
Image<T, C, I> out(img.width, img.height, img.position());
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1=scalar-val2;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator*(
const vec<T, C>& scalar, const Image<T, C, I>& img) {
Image<T, C, I> out(img.width, img.height, img.position());
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1=scalar*val2;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator/(
const vec<T, C>& scalar, const Image<T, C, I>& img) {
Image<T, C, I> out(img.width, img.height, img.position());
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1=scalar/val2;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator+(
const Image<T, C, I>& img, const vec<T, C>& scalar) {
Image<T, C, I> out(img.width, img.height, img.position());
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1=val2+scalar;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator-(
const Image<T, C, I>& img, const vec<T, C>& scalar) {
Image<T, C, I> out(img.width, img.height, img.position());
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1=val2-scalar;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator*(
const Image<T, C, I>& img, const vec<T, C>& scalar) {
Image<T, C, I> out(img.width, img.height, img.position());
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1=val2*scalar;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator/(
const Image<T, C, I>& img, const vec<T, C>& scalar) {
Image<T, C, I> out(img.width, img.height, img.position());
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1=val2/scalar;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator-(
const Image<T, C, I>& img) {
Image<T, C, I> out(img.width, img.height, img.position());
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1=-val2;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator+=(
Image<T, C, I>& out, const Image<T, C, I>& img) {
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1+=val2;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator-=(
Image<T, C, I>& out, const Image<T, C, I>& img) {
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1-=val2;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator*=(
Image<T, C, I>& out, const Image<T, C, I>& img) {
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1*=val2;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator/=(
Image<T, C, I>& out, const Image<T, C, I>& img) {
std::function<void(vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2) {val1/=val2;};
Transform(out, img, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator+=(
Image<T, C, I>& out, const vec<T, C>& scalar) {
std::function<void(vec<T, C>&)> f = [=](vec<T,C>& val1) {val1+=scalar;};
Transform(out, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator-=(
Image<T, C, I>& out, const vec<T, C>& scalar) {
std::function<void(vec<T, C>&)> f = [=](vec<T,C>& val1) {val1-=scalar;};
Transform(out, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator*=(
Image<T, C, I>& out, const vec<T, C>& scalar) {
std::function<void(vec<T, C>&)> f = [=](vec<T,C>& val1) {val1*=scalar;};
Transform(out, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator/=(
Image<T, C, I>& out, const vec<T, C>& scalar) {
std::function<void(vec<T, C>&)> f = [=](vec<T,C>& val1) {val1/=scalar;};
Transform(out, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator+(
const Image<T, C, I>& img1, const Image<T, C, I>& img2) {
Image<T, C, I> out(img1.width, img1.height);
std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2,const vec<T,C>& val3) {val1=val2+val3;};
Transform(out, img1, img2, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator-(
const Image<T, C, I>& img1, const Image<T, C, I>& img2) {
Image<T, C, I> out(img1.width, img1.height);
std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2,const vec<T,C>& val3) {val1=val2-val3;};
Transform(out, img1, img2, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator*(
const Image<T, C, I>& img1, const Image<T, C, I>& img2) {
Image<T, C, I> out(img1.width, img1.height);
std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2,const vec<T,C>& val3) {val1=val2*val3;};
Transform(out, img1, img2, f);
return out;
}
template<class T, int C, ImageType I> Image<T, C, I> operator/(
const Image<T, C, I>& img1, const Image<T, C, I>& img2) {
Image<T, C, I> out(img1.width, img1.height);
std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f =
[=](vec<T,C>& val1,const vec<T,C>& val2,const vec<T,C>& val3) {val1=val2/val3;};
Transform(out, img1, img2, f);
return out;
}
template<class T, int C, ImageType I> void WriteImageToRawFile(
const std::string& file, const Image<T, C, I>& img) {
std::ostringstream vstr;
std::string fileName = GetFileWithoutExtension(file);
vstr << fileName << ".raw";
FILE* f = fopen(vstr.str().c_str(), "wb");
if (f == NULL) {
throw std::runtime_error(
MakeString() << "Could not open " << vstr.str().c_str()
<< " for writing.");
}
for (int c = 0; c < img.channels; c++) {
for (int j = 0; j < img.height; j++) {
for (int i = 0; i < img.width; i++) {
T val = img(i, j)[c];
fwrite(&val, sizeof(T), 1, f);
}
}
}
fclose(f);
std::string typeName = "";
switch (img.type) {
case ImageType::BYTE:
typeName = "Byte";
break;
case ImageType::UBYTE:
typeName = "Unsigned Byte";
break;
case ImageType::SHORT:
typeName = "Short";
break;
case ImageType::USHORT:
typeName = "Unsigned Short";
break;
case ImageType::INT:
typeName = "Integer";
break;
case ImageType::UINT:
typeName = "Unsigned Integer";
break;
case ImageType::FLOAT:
typeName = "Float";
break;
case ImageType::DOUBLE:
typeName = "Double";
break;
}
//std::cout << vstr.str() << std::endl;
std::stringstream sstr;
sstr << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n";
sstr << "<!-- MIPAV header file -->\n";
sstr
<< "<image xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" nDimensions=\"3\">\n";
sstr << " <Dataset-attributes>\n";
sstr << " <Image-offset>0</Image-offset>\n";
sstr << " <Data-type>" << typeName << "</Data-type>\n";
sstr << " <Endianess>Little</Endianess>\n";
sstr << " <Extents>" << img.width << "</Extents>\n";
sstr << " <Extents>" << img.height << "</Extents>\n";
sstr << " <Extents>" << img.channels << "</Extents>\n";
sstr << " <Resolutions>\n";
sstr << " <Resolution>1.0</Resolution>\n";
sstr << " <Resolution>1.0</Resolution>\n";
sstr << " <Resolution>1.0</Resolution>\n";
sstr << " </Resolutions>\n";
sstr << " <Slice-spacing>1.0</Slice-spacing>\n";
sstr << " <Slice-thickness>0.0</Slice-thickness>\n";
sstr << " <Units>Millimeters</Units>\n";
sstr << " <Units>Millimeters</Units>\n";
sstr << " <Units>Millimeters</Units>\n";
sstr << " <Compression>none</Compression>\n";
sstr << " <Orientation>Unknown</Orientation>\n";
sstr << " <Subject-axis-orientation>Unknown</Subject-axis-orientation>\n";
sstr << " <Subject-axis-orientation>Unknown</Subject-axis-orientation>\n";
sstr << " <Subject-axis-orientation>Unknown</Subject-axis-orientation>\n";
sstr << " <Origin>0.0</Origin>\n";
sstr << " <Origin>0.0</Origin>\n";
sstr << " <Origin>0.0</Origin>\n";
sstr << " <Modality>Unknown Modality</Modality>\n";
sstr << " </Dataset-attributes>\n";
sstr << "</image>\n";
std::ofstream myfile;
std::stringstream xmlFile;
xmlFile << fileName << ".xml";
myfile.open(xmlFile.str().c_str(), std::ios_base::out);
if (!myfile.is_open()) {
throw std::runtime_error(
MakeString() << "Could not open " << xmlFile.str()
<< " for writing.");
}
myfile << sstr.str();
myfile.close();
}
typedef Image<uint8_t, 4, ImageType::UBYTE> ImageRGBA;
typedef Image<int, 4, ImageType::INT> ImageRGBAi;
typedef Image<float, 4, ImageType::FLOAT> ImageRGBAf;
typedef Image<uint8_t, 3, ImageType::UBYTE> ImageRGB;
typedef Image<int, 3, ImageType::INT> ImageRGBi;
typedef Image<float, 3, ImageType::FLOAT> ImageRGBf;
typedef Image<uint8_t, 1, ImageType::UBYTE> ImageA;
typedef Image<int, 1, ImageType::INT> ImageAi;
typedef Image<float, 1, ImageType::FLOAT> ImageAf;
typedef Image<int8_t, 4, ImageType::BYTE> Image4b;
typedef Image<uint8_t, 4, ImageType::UBYTE> Image4ub;
typedef Image<uint16_t, 4, ImageType::USHORT> Image4us;
typedef Image<int16_t, 4, ImageType::SHORT> Image4s;
typedef Image<int, 4, ImageType::INT> Image4i;
typedef Image<uint32_t, 4, ImageType::UINT> Image4ui;
typedef Image<float, 4, ImageType::FLOAT> Image4f;
typedef Image<int8_t, 3, ImageType::BYTE> Image3b;
typedef Image<uint8_t, 3, ImageType::UBYTE> Image3ub;
typedef Image<uint16_t, 3, ImageType::USHORT> Image3us;
typedef Image<int16_t, 3, ImageType::SHORT> Image3s;
typedef Image<int, 3, ImageType::INT> Image3i;
typedef Image<uint32_t, 3, ImageType::UINT> Image3ui;
typedef Image<float, 3, ImageType::FLOAT> Image3f;
typedef Image<int8_t, 2, ImageType::BYTE> Image2b;
typedef Image<uint8_t, 2, ImageType::UBYTE> Image2ub;
typedef Image<uint16_t, 2, ImageType::USHORT> Image2us;
typedef Image<int16_t, 2, ImageType::SHORT> Image2s;
typedef Image<int, 2, ImageType::INT> Image2i;
typedef Image<uint32_t, 2, ImageType::UINT> Image2ui;
typedef Image<float, 2, ImageType::FLOAT> Image2f;
typedef Image<int8_t, 1, ImageType::BYTE> Image1b;
typedef Image<uint8_t, 1, ImageType::UBYTE> Image1ub;
typedef Image<uint16_t, 1, ImageType::USHORT> Image1us;
typedef Image<int16_t, 1, ImageType::SHORT> Image1s;
typedef Image<int, 1, ImageType::INT> Image1i;
typedef Image<uint32_t, 1, ImageType::UINT> Image1ui;
typedef Image<float, 1, ImageType::FLOAT> Image1f;
void WriteImageToFile(const std::string& file, const ImageRGBA& img);
void WriteImageToFile(const std::string& file, const ImageRGB& img);
void WriteImageToFile(const std::string& file, const ImageRGBAf& img);
void WriteImageToFile(const std::string& file, const ImageRGBf& img);
void WriteImageToFile(const std::string& file, const Image1f& img);
void WriteImageToFile(const std::string& file, const Image1ub& img);
void ReadImageFromFile(const std::string& file, Image1ub& img);
void ReadImageFromFile(const std::string& file, Image1f& img);
void ReadImageFromFile(const std::string& file, ImageRGBA& img);
void ReadImageFromFile(const std::string& file, ImageRGB& img);
void ReadImageFromFile(const std::string& file, ImageRGBAf& img);
void ReadImageFromFile(const std::string& file, ImageRGBf& img);
void ConvertImage(const ImageRGBAf& in, ImageRGBA& out);
void ConvertImage(const ImageRGBf& in, ImageRGB& out);
void ConvertImage(const ImageRGBA& in, ImageRGBAf& out);
void ConvertImage(const ImageRGB& in, ImageRGBf& out);
void ConvertImage(const ImageRGBA& in, ImageRGB& out);
void ConvertImage(const ImageRGBAf& in, ImageRGBf& out);
void ConvertImage(const ImageRGB& in, ImageRGBA& out);
void ConvertImage(const ImageRGBf& in, ImageRGBAf& out);
template<class T, ImageType I> void ConvertImage(const Image<T, 4, I>& in,
Image<T, 1, I>& out, bool sRGB = true) {
out.resize(in.width, in.height);
out.id = in.id;
out.setPosition(in.position());
int N = (int) out.size();
if (sRGB) {
#pragma omp parallel for
for (int i = 0; i < N; i++) {
vec<T, 4> c = in[i];
out[i] = vec<T, 1>(T(0.21 * c.x + 0.72 * c.y + 0.07 * c.z));
}
} else {
#pragma omp parallel for
for (int i = 0; i < N; i++) {
vec<T, 4> c = in[i];
out[i] = vec<T, 1>(T(0.30 * c.x + 0.59 * c.y + 0.11 * c.z));
}
}
}
template<class T, ImageType I> void ConvertImage(const Image<T, 4, I>& in,
Image<T, 2, I>& out, bool sRGB = true) {
out.resize(in.width, in.height);
out.id = in.id;
out.setPosition(in.position());
int N = out.size();
if (sRGB) {
#pragma omp parallel for
for (int i = 0; i < N; i++) {
vec<T, 4> c = in[i];
out[i] = vec<T, 2>(T(0.21 * c.x + 0.72 * c.y + 0.07 * c.z), c.w);
}
} else {
#pragma omp parallel for
for (int i = 0; i < N; i++) {
vec<T, 4> c = in[i];
out[i] = vec<T, 2>(T(0.30 * c.x + 0.59 * c.y + 0.11 * c.z), c.w);
}
}
}
template<class T, ImageType I> void ConvertImage(const Image<T, 3, I>& in,
Image<T, 1, I>& out, bool sRGB = true) {
out.resize(in.width, in.height);
out.id = in.id;
out.setPosition(in.position());
int N = (int) out.size();
if (sRGB) {
#pragma omp parallel for
for (int i = 0; i < N; i++) {
vec<T, 3> c = in[i];
out[i] = vec<T, 1>(T(0.21 * c.x + 0.72 * c.y + 0.07 * c.z));
}
} else {
#pragma omp parallel for
for (int i = 0; i < N; i++) {
vec<T, 3> c = in[i];
out[i] = vec<T, 1>(T(0.30 * c.x + 0.59 * c.y + 0.11 * c.z));
}
}
}
inline void MakeCheckerBoard(ImageRGBA& img, int horizTiles,int vertTiles){
const int width=img.width;
const int height=img.height;
const int cellWidth = width / horizTiles;
const int cellHeight = height / vertTiles;
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
bool vt = (i / cellWidth) % 2 == 0;
bool ht = (j / cellHeight) % 2 == 0;
img(i, j) =((vt && !ht) || (!vt && ht)) ?aly::RGBA(0, 0, 0, 0) : aly::RGBA(255, 255, 255, 255);
}
}
}
inline void MakeCheckerBoard(ImageRGB& img, int horizTiles,int vertTiles){
const int width=img.width;
const int height=img.height;
const int cellWidth = width / horizTiles;
const int cellHeight = height / vertTiles;
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
bool vt = (i / cellWidth) % 2 == 0;
bool ht = (j / cellHeight) % 2 == 0;
img(i, j) =((vt && !ht) || (!vt && ht)) ?aly::ubyte3(0, 0, 0) : aly::ubyte3(255, 255, 255);
}
}
}
inline void MakeCheckerBoard(ImageRGBAf& img, int horizTiles,int vertTiles){
const int width=img.width;
const int height=img.height;
const int cellWidth = width / horizTiles;
const int cellHeight = height / vertTiles;
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
bool vt = (i / cellWidth) % 2 == 0;
bool ht = (j / cellHeight) % 2 == 0;
img(i, j) =((vt && !ht) || (!vt && ht)) ?RGBAf(0, 0, 0, 0) : RGBAf(1.0f,1.0f,1.0f,1.0f);
}
}
}
inline void MakeCheckerBoard(ImageRGBf& img, int horizTiles,int vertTiles){
const int width=img.width;
const int height=img.height;
const int cellWidth = width / horizTiles;
const int cellHeight = height / vertTiles;
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
bool vt = (i / cellWidth) % 2 == 0;
bool ht = (j / cellHeight) % 2 == 0;
img(i, j) =((vt && !ht) || (!vt && ht)) ?RGBf(0, 0, 0) : RGBf(1.0f,1.0f,1.0f);
}
}
}
template<class T, int C, ImageType I> void Crop(const Image<T, C, I>& in,
Image<T, C, I>& out, int2 pos, int2 dims) {
out.setPosition(pos);
out.resize(dims.x, dims.y);
for (int i = 0; i < dims.x; i++) {
for (int j = 0; j < dims.y; j++) {
out(i, j) = in(pos.x + i, pos.y + j);
}
}
}
template<class T, int C, ImageType I> void FlipVertical(Image<T, C, I>& in) {
#pragma omp parallel for
for (int i = 0; i < in.width; i++) {
for (int j = 0; j < in.height / 2; j++) {
std::swap(in(i, j), in(i, in.height - 1 - j));
}
}
}
template<class T, int C, ImageType I> void FlipHorizontal(Image<T, C, I>& in) {
#pragma omp parallel for
for (int j = 0; j < in.height; j++) {
for (int i = 0; i < in.width / 2; i++) {
std::swap(in(i, j), in(in.width - 1 - i, j));
}
}
}
template<class T, int C, ImageType I> void DownSample(const Image<T, C, I>& in,
Image<T, C, I>& out) {
static const double Kernel[5][5] = { { 1, 4, 6, 4, 1 },
{ 4, 16, 24, 16, 4 }, { 6, 24, 36, 24, 6 }, { 4, 16, 24, 16, 4 }, {
1, 4, 6, 4, 1 } };
out.resize(in.width / 2, in.height / 2);
#pragma omp parallel for
for (int i = 0; i < out.width; i++) {
for (int j = 0; j < out.height; j++) {
vec<double, C> vsum(0.0);
for (int ii = 0; ii < 5; ii++) {
for (int jj = 0; jj < 5; jj++) {
vsum += Kernel[ii][jj]
* vec<double, C>(
in(2 * i + ii - 2, 2 * j + jj - 2));
}
}
out(i, j) = vec<T, C>(vsum / 256.0);
}
}
}
template<class T, int C, ImageType I> void UpSample(const Image<T, C, I>& in,
Image<T, C, I>& out) {
static const double Kernel[5][5] = { { 1, 4, 6, 4, 1 },
{ 4, 16, 24, 16, 4 }, { 6, 24, 36, 24, 6 }, { 4, 16, 24, 16, 4 }, {
1, 4, 6, 4, 1 } };
if (out.size() == 0)
out.resize(in.width * 2, in.height * 2);
#pragma omp parallel for
for (int i = 0; i < out.width; i++) {
for (int j = 0; j < out.height; j++) {
vec<double, C> vsum(0.0);
for (int ii = 0; ii < 5; ii++) {
for (int jj = 0; jj < 5; jj++) {
int iii = i + ii - 2;
int jjj = j + jj - 2;
if (iii % 2 == 0 && jjj % 2 == 0) {
vsum += Kernel[ii][jj]
* vec<double, C>(in(iii / 2, jjj / 2));
}
}
}
out(i, j) = vec<T, C>(vsum / 64.0);
}
}
}
template<class T, int C, ImageType I> void Set(const Image<T, C, I>& in,
Image<T, C, I>& out, int2 pos) {
for (int i = 0; i < in.width; i++) {
for (int j = 0; j < in.height; j++) {
if (pos.x + i >= 0 && pos.x + i < out.width && pos.y + j >= 0
&& pos.y + j < out.height) {
out(pos.x + i, pos.y + j) = in(i, j);
}
}
}
}
template<class T, int C, ImageType I> void Tile(
const std::vector<Image<T, C, I>>& in, Image<T, C, I>& out, int rows,
int cols) {
int index = 0;
int maxX = 0;
int maxY = 0;
std::vector<int> lines(rows, 0);
for (int r = 0; r < rows; r++) {
int runX = 0;
int runY = 0;
for (int c = 0; c < cols; c++) {
if (index >= (int)in.size())
break;
const Image<T, C, I>& img = in[index++];
runX += img.width;
runY = std::max(runY, img.height);
}
maxX = std::max(runX, maxX);
lines[r] = maxY;
maxY += runY;
if (index >= (int)in.size())
break;
}
out.resize(maxX, maxY);
out.set(vec<T, C>(T(0)));
index = 0;
for (int r = 0; r < rows; r++) {
int runX = 0;
int runY = lines[r];
for (int c = 0; c < cols; c++) {
if (index >= (int)in.size())
break;
const Image<T, C, I>& img = in[index++];
Set(img, out, int2(runX, runY));
runX += img.width;
}
if (index >= (int)in.size())
break;
}
}
template<class T, int C, ImageType I> void Tile(
const std::initializer_list<Image<T, C, I>>& in, Image<T, C, I>& out,
int rows, int cols) {
int maxX = 0;
int maxY = 0;
std::vector<int> lines(rows, 0);
{
auto iter = in.begin();
for (int r = 0; r < rows; r++) {
int runX = 0;
int runY = 0;
for (int c = 0; c < cols; c++) {
if (iter == in.end())
break;
const Image<T, C, I>& img = *iter;
iter++;
runX += img.width;
runY = std::max(runY, img.height);
}
maxX = std::max(runX, maxX);
lines[r] = maxY;
maxY += runY;
if (iter == in.end())
break;
}
}
out.resize(maxX, maxY);
out.set(vec<T, C>(T(0)));
{
auto iter = in.begin();
for (int r = 0; r < rows; r++) {
int runX = 0;
int runY = lines[r];
for (int c = 0; c < cols; c++) {
if (iter == in.end())
break;
const Image<T, C, I>& img = *iter;
iter++;
Set(img, out, int2(runX, runY));
runX += img.width;
}
if (iter == in.end())
break;
}
}
}
void ConvertImage(const ImageRGBAf& in, Image1ub& out, bool sRGB = true);
void ConvertImage(const ImageRGBf& in, Image1ub& out, bool sRGB = true);
void ConvertImage(const ImageRGB& in, Image1f& out, bool sRGB = true);
void ConvertImage(const ImageRGBA& in, Image1f& out, bool sRGB = true);
void ConvertImage(const Image1f& in, ImageRGBAf& out);
void ConvertImage(const Image2f& in, ImageRGBAf& out);
void ConvertImage(const Image1f& in, ImageRGBf& out);
void ConvertImage(const Image1b& in, ImageRGBAf& out);
void ConvertImage(const Image1b& in, ImageRGBf& out);
void ConvertImage(const Image1b& in, ImageRGBA& out);
void ConvertImage(const Image1b& in, ImageRGB& out);
void ConvertImage(const Image1f& in, ImageRGBA& out);
void ConvertImage(const Image1f& in, ImageRGB& out);
}
;
#endif
|
rose_v1_foo.c | #if 1
#include <omp.h>
void foo1(double o1[],double c[],int len)
{
int i;
#pragma omp parallel for private (i) firstprivate (len)
for (i = 0; i <= len - 1; i += 1) {
double volnew_o8 = 0.5 * c[i];
o1[i] = volnew_o8;
}
}
#endif
#if 1
void goo(double *o1,double *o2,double *a,double *b,double *c,int **idx,int len)
{
int i;
for (i = 0; i <= len - 1; i += 1) {
int ii;
const int *lidx = idx[i];
double volnew_o8 = 0.5 * c[i];
double volold_o8 = 0.5 * a[i] * b[i];
for (ii = 0; ii <= 5; ii += 1) {
int llidx = lidx[ii];
o1[llidx] += volnew_o8;
o2[llidx] += volold_o8;
}
}
}
#endif
|
Cover.h | /*
* Cover.h
*
* Created on: 03.10.2013
* Author: cls
*/
#ifndef COVER_H_
#define COVER_H_
#include <cinttypes>
#include <set>
#include <vector>
#include <map>
#include <cassert>
#include <limits>
#include "Partition.h"
#include "../Globals.h"
namespace NetworKit {
/**
* @ingroup structures
* Implements a cover of a set, i.e. an assignment of
* its elements to possibly overlapping subsets.
*/
class Cover {
public:
/** Default constructor */
Cover();
/**
* Create a new cover data structure for elements up to a maximum element index.
*
* @param[in] z maximum index
*/
Cover(index z);
/**
* Creates a new cover data structure which contains the given partition.
*
* @param[in] p The partition to construct the cover from
*/
Cover(const Partition &p);
/** Default destructor */
virtual ~Cover() = default;
/**
* Index operator.
*
* @param[in] e an element
*/
inline std::set<index>& operator [](const index& e) {
return this->data[e];
}
/**
* Index operator for const instances of this class.
*
* @param[in] e an element
*/
inline const std::set<index>& operator [](const index& e) const {
return this->data[e];
}
/**
* Return the ids of subsets in which the element @a e is contained.
*
* @param[in] e an element
* @return A set of subset ids in which @a e is contained.
*/
inline std::set<index> subsetsOf(index e) const {
// TODO: assert (e < this->numberOfElements());
return this->data[e];
}
/**
* Check if cover assigns a valid subset to the element @a e.
*
* @param e an element.
* @return @c true, if @a e is assigned to a valid subset, @c false otherwise.
*/
bool contains(index e) const;
/**
* Check if two elements @a e1 and @a e2 belong to the same subset.
*
* @param e1 an element.
* @param e2 an element.
* @return @c true, if @a e1 and @a e2 belong to the same subset, @c false otherwise.
*/
bool inSameSubset(index e1, index e2) const;
/**
* Get the members of a specific subset @a s.
*
* @return The set of members of subset @a s.
*/
std::set<index> getMembers(const index s) const;
/**
* Add the (previously unassigned) element @a e to the set @a s.
* @param[in] s a subset
* @param[in] e an element
*/
void addToSubset(index s, index e);
/**
* Remove the element @a e from the set @a s.
* @param[in] s a subset
* @param[in] e an element
*/
void removeFromSubset(index s, index e);
/**
* Move the element @a e to subset @a s, i.e. remove it from all
* other subsets and place it in the subset.
* @param[in] s a subset
* @param[in] e an element
*/
void moveToSubset(index s, index e);
/**
* Creates a singleton set containing the element @a e and returns the index of the new set.
* @param[in] e an element
* @return The index of the new set.
*/
index toSingleton(index e);
/**
* Assigns every element to a singleton set.
* Set id is equal to element id.
*/
void allToSingletons();
/**
* Assigns the elements from both sets to a new set.
* @param[in] s a subset
* @param[in] t a subset
*/
void mergeSubsets(index s, index t);
/**
* Get an upper bound for the subset ids that have been assigned.
* (This is the maximum id + 1.)
*
* @return An upper bound.
*/
index upperBound() const;
/**
* Get a lower bound for the subset ids that have been assigned.
* @return A lower bound.
*/
index lowerBound() const;
/**
* Get a list of subset sizes. Indices do not necessarily correspond to subset ids.
*
* @return A list of subset sizes.
*/
std::vector<count> subsetSizes() const;
/**
* Get a map from subset id to size of the subset.
*
* @return A map from subset id to size of the subset.
*/
std::map<index, count> subsetSizeMap() const;
/**
* Get the current number of sets in this cover.
*
* @return The number of sets in this cover.
*/
count numberOfSubsets() const;
/**
* Get the current number of elements in this cover.
*
* @return The current number of elements.
*/
count numberOfElements() const;
/**
* Add an additional element (node).
*/
index extend();
/**
* Get the ids of nonempty subsets.
*
* @return A set of ids of nonempty subsets.
*/
std::set<index> getSubsetIds() const;
/**
* Sets an upper bound for the subset ids that CAN be assigned.
*
* @param[in] upper highest assigned subset ID + 1
*/
void setUpperBound(index upper);
/**
* Iterate over all entries (node, subset ID of node) and execute callback function @a func (lambda closure).
*
* @param func Takes parameters <code>(node, index)</code>
*/
template<typename Callback> void forEntries(Callback func) const;
/**
* Iterate over all entries (node, subset ID of node) in parallel and execute callback function @a func (lambda closure).
*
* @param func Takes parameters <code>(node, index)</code>
*/
template<typename Callback> void parallelForEntries(Callback handle) const;
private:
index z; //!< maximum element index that can be mapped
index omega; //!< maximum subset index ever assigned
std::vector<std::set<index>> data; //!< data container, indexed by element id, containing set of subset ids
/**
* Allocates and returns a new subset id.
*/
inline index newSubsetId() {
omega++;
index s = omega;
return s;
}
};
template<typename Callback>
inline void Cover::forEntries(Callback handle) const {
for (index e = 0; e <= this->z; e += 1) {
handle(e, data[e]);
}
}
template<typename Callback>
inline void Cover::parallelForEntries(Callback handle) const {
#pragma omp parallel for
for (omp_index e = 0; e <= static_cast<omp_index>(this->z); e += 1) {
handle(e, data[e]);
}
}
} /* namespace NetworKit */
#endif /* COVER_H_ */
|
par_mgr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Two-grid system solver
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#include "par_mgr.h"
#ifdef HYPRE_USING_DSUPERLU
#include "dsuperlu.h"
#endif
/* Create */
void *
hypre_MGRCreate()
{
hypre_ParMGRData *mgr_data;
mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST);
/* block data */
(mgr_data -> block_size) = 1;
(mgr_data -> block_num_coarse_indexes) = NULL;
(mgr_data -> point_marker_array) = NULL;
(mgr_data -> block_cf_marker) = NULL;
/* general data */
(mgr_data -> max_num_coarse_levels) = 10;
(mgr_data -> A_array) = NULL;
(mgr_data -> P_array) = NULL;
(mgr_data -> RT_array) = NULL;
(mgr_data -> RAP) = NULL;
(mgr_data -> CF_marker_array) = NULL;
(mgr_data -> coarse_indices_lvls) = NULL;
(mgr_data -> A_ff_array) = NULL;
(mgr_data -> F_fine_array) = NULL;
(mgr_data -> U_fine_array) = NULL;
(mgr_data -> aff_solver) = NULL;
(mgr_data -> fine_grid_solver_setup) = NULL;
(mgr_data -> fine_grid_solver_solve) = NULL;
(mgr_data -> F_array) = NULL;
(mgr_data -> U_array) = NULL;
(mgr_data -> residual) = NULL;
(mgr_data -> rel_res_norms) = NULL;
(mgr_data -> Vtemp) = NULL;
(mgr_data -> Ztemp) = NULL;
(mgr_data -> Utemp) = NULL;
(mgr_data -> Ftemp) = NULL;
(mgr_data -> num_iterations) = 0;
(mgr_data -> num_interp_sweeps) = 1;
(mgr_data -> num_restrict_sweeps) = 1;
(mgr_data -> trunc_factor) = 0.0;
(mgr_data -> max_row_sum) = 0.9;
(mgr_data -> strong_threshold) = 0.25;
(mgr_data -> P_max_elmts) = 0;
(mgr_data -> coarse_grid_solver) = NULL;
(mgr_data -> coarse_grid_solver_setup) = NULL;
(mgr_data -> coarse_grid_solver_solve) = NULL;
(mgr_data -> global_smoother) = NULL;
(mgr_data -> use_default_cgrid_solver) = 1;
(mgr_data -> use_default_fsolver) = -1; // set to -1 to avoid printing when not used
(mgr_data -> omega) = 1.;
(mgr_data -> max_iter) = 20;
(mgr_data -> tol) = 1.0e-6;
(mgr_data -> relax_type) = 0;
(mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms.
(mgr_data -> interp_type) = NULL;
(mgr_data -> restrict_type) = NULL;
(mgr_data -> num_relax_sweeps) = 1;
(mgr_data -> relax_weight) = 1.0;
(mgr_data -> logging) = 0;
(mgr_data -> print_level) = 0;
(mgr_data -> frelax_print_level) = 0;
(mgr_data -> cg_print_level) = 0;
(mgr_data -> l1_norms) = NULL;
(mgr_data -> reserved_coarse_size) = 0;
(mgr_data -> reserved_coarse_indexes) = NULL;
(mgr_data -> reserved_Cpoint_local_indexes) = NULL;
(mgr_data -> diaginv) = NULL;
(mgr_data -> global_smooth_iters) = 1;
(mgr_data -> global_smooth_type) = 0;
(mgr_data -> set_non_Cpoints_to_F) = 0;
(mgr_data -> idx_array) = NULL;
(mgr_data -> Frelax_method) = NULL;
(mgr_data -> VcycleRelaxVtemp) = NULL;
(mgr_data -> VcycleRelaxZtemp) = NULL;
(mgr_data -> FrelaxVcycleData) = NULL;
(mgr_data -> Frelax_num_functions) = NULL;
(mgr_data -> max_local_lvls) = 10;
(mgr_data -> use_non_galerkin_cg) = NULL;
(mgr_data -> print_coarse_system) = 0;
(mgr_data -> set_c_points_method) = 0;
(mgr_data -> lvl_to_keep_cpoints) = 0;
(mgr_data -> cg_convergence_factor) = 0.0;
(mgr_data -> truncate_coarse_grid_threshold) = 0.0;
return (void *) mgr_data;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* Destroy */
HYPRE_Int
hypre_MGRDestroy( void *data )
{
hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data;
HYPRE_Int i;
HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels);
/* block info data */
if ((mgr_data -> block_cf_marker))
{
for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
}
}
hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if (mgr_data -> block_num_coarse_indexes)
{
hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* final residual vector */
if ((mgr_data -> residual))
{
hypre_ParVectorDestroy( (mgr_data -> residual) );
(mgr_data -> residual) = NULL;
}
if ((mgr_data -> rel_res_norms))
{
hypre_TFree( (mgr_data -> rel_res_norms), HYPRE_MEMORY_HOST);
(mgr_data -> rel_res_norms) = NULL;
}
/* temp vectors for solve phase */
if ((mgr_data -> Vtemp))
{
hypre_ParVectorDestroy( (mgr_data -> Vtemp) );
(mgr_data -> Vtemp) = NULL;
}
if ((mgr_data -> Ztemp))
{
hypre_ParVectorDestroy( (mgr_data -> Ztemp) );
(mgr_data -> Ztemp) = NULL;
}
if ((mgr_data -> Utemp))
{
hypre_ParVectorDestroy( (mgr_data -> Utemp) );
(mgr_data -> Utemp) = NULL;
}
if ((mgr_data -> Ftemp))
{
hypre_ParVectorDestroy( (mgr_data -> Ftemp) );
(mgr_data -> Ftemp) = NULL;
}
/* coarse grid solver */
if ((mgr_data -> use_default_cgrid_solver))
{
if ((mgr_data -> coarse_grid_solver))
{
hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) );
}
(mgr_data -> coarse_grid_solver) = NULL;
}
/* l1_norms */
if ((mgr_data -> l1_norms))
{
for (i = 0; i < (num_coarse_levels); i++)
{
hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]);
}
hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST);
}
/* coarse_indices_lvls */
if ((mgr_data -> coarse_indices_lvls))
{
for (i = 0; i < (num_coarse_levels); i++)
if ((mgr_data -> coarse_indices_lvls)[i])
{
hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST);
}
hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST);
}
/* linear system and cf marker array */
if (mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array ||
mgr_data -> CF_marker_array)
{
for (i = 1; i < num_coarse_levels + 1; i++)
{
hypre_ParVectorDestroy((mgr_data -> F_array)[i]);
hypre_ParVectorDestroy((mgr_data -> U_array)[i]);
if ((mgr_data -> P_array)[i - 1])
{
hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i - 1]);
}
if ((mgr_data -> RT_array)[i - 1])
{
hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i - 1]);
}
hypre_IntArrayDestroy(mgr_data -> CF_marker_array[i - 1]);
}
for (i = 1; i < (num_coarse_levels); i++)
{
if ((mgr_data -> A_array)[i])
{
hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]);
}
}
}
/* AMG for Frelax */
if (mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array)
{
for (i = 1; i < num_coarse_levels + 1; i++)
{
if (mgr_data -> F_fine_array[i])
{
hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]);
}
if (mgr_data -> U_fine_array[i])
{
hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]);
}
}
for (i = 1; i < (num_coarse_levels); i++)
{
if ((mgr_data -> A_ff_array)[i])
{
hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]);
}
}
if (mgr_data -> use_default_fsolver)
{
hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]);
}
hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST);
(mgr_data -> F_fine_array) = NULL;
hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST);
(mgr_data -> U_fine_array) = NULL;
hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST);
(mgr_data -> A_ff_array) = NULL;
}
if (mgr_data -> aff_solver)
{
for (i = 1; i < (num_coarse_levels); i++)
{
if ((mgr_data -> aff_solver)[i])
{
hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]);
}
}
if (mgr_data -> use_default_fsolver)
{
if ((mgr_data -> aff_solver)[0])
{
hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]);
}
}
hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST);
(mgr_data -> aff_solver) = NULL;
}
if ((mgr_data -> F_array))
{
hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST);
(mgr_data -> F_array) = NULL;
}
if ((mgr_data -> U_array))
{
hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST);
(mgr_data -> U_array) = NULL;
}
if ((mgr_data -> A_array))
{
hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST);
(mgr_data -> A_array) = NULL;
}
if ((mgr_data -> P_array))
{
hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST);
(mgr_data -> P_array) = NULL;
}
if ((mgr_data -> RT_array))
{
hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST);
(mgr_data -> RT_array) = NULL;
}
if ((mgr_data -> CF_marker_array))
{
hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST);
(mgr_data -> CF_marker_array) = NULL;
}
if ((mgr_data -> reserved_Cpoint_local_indexes))
{
hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> reserved_Cpoint_local_indexes) = NULL;
}
if (mgr_data -> restrict_type)
{
hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
if (mgr_data -> interp_type)
{
hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
/* Frelax_method */
if (mgr_data -> Frelax_method)
{
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
/* Frelax_num_functions */
if (mgr_data -> Frelax_num_functions)
{
hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_num_functions) = NULL;
}
/* data for V-cycle F-relaxation */
if ((mgr_data -> VcycleRelaxVtemp))
{
hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) );
(mgr_data -> VcycleRelaxVtemp) = NULL;
}
if ((mgr_data -> VcycleRelaxZtemp))
{
hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) );
(mgr_data -> VcycleRelaxZtemp) = NULL;
}
if (mgr_data -> FrelaxVcycleData)
{
for (i = 0; i < num_coarse_levels; i++)
{
if ((mgr_data -> FrelaxVcycleData)[i])
{
hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]);
(mgr_data -> FrelaxVcycleData)[i] = NULL;
}
}
hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST);
(mgr_data -> FrelaxVcycleData) = NULL;
}
/* data for reserved coarse nodes */
if (mgr_data -> reserved_coarse_indexes)
{
hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST);
(mgr_data -> reserved_coarse_indexes) = NULL;
}
/* index array for setting Cpoints by global block */
if ((mgr_data -> set_c_points_method) == 1)
{
hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST);
(mgr_data -> idx_array) = NULL;
}
/* array for setting option to use non-Galerkin coarse grid */
if (mgr_data -> use_non_galerkin_cg)
{
hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST);
(mgr_data -> use_non_galerkin_cg) = NULL;
}
/* coarse level matrix - RAP */
if ((mgr_data -> RAP))
{
hypre_ParCSRMatrixDestroy((mgr_data -> RAP));
}
if ((mgr_data -> diaginv))
{
hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST);
}
if ((mgr_data -> global_smoother))
{
if (mgr_data -> global_smooth_type == 8)
{
HYPRE_EuclidDestroy((mgr_data -> global_smoother));
}
else if (mgr_data -> global_smooth_type == 16)
{
HYPRE_ILUDestroy((mgr_data -> global_smoother));
}
}
/* mgr data */
hypre_TFree(mgr_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Create data for V-cycle F-relaxtion */
void *
hypre_MGRCreateFrelaxVcycleData()
{
hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST);
hypre_ParAMGDataAArray(vdata) = NULL;
hypre_ParAMGDataPArray(vdata) = NULL;
hypre_ParAMGDataFArray(vdata) = NULL;
hypre_ParAMGDataCFMarkerArray(vdata) = NULL;
hypre_ParAMGDataVtemp(vdata) = NULL;
hypre_ParAMGDataAMat(vdata) = NULL;
hypre_ParAMGDataBVec(vdata) = NULL;
hypre_ParAMGDataZtemp(vdata) = NULL;
hypre_ParAMGDataCommInfo(vdata) = NULL;
hypre_ParAMGDataUArray(vdata) = NULL;
hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL;
hypre_ParAMGDataNumLevels(vdata) = 0;
hypre_ParAMGDataMaxLevels(vdata) = 10;
hypre_ParAMGDataNumFunctions(vdata) = 1;
hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0;
hypre_ParAMGDataRelaxOrder(vdata) = 1;
hypre_ParAMGDataMaxCoarseSize(vdata) = 9;
hypre_ParAMGDataMinCoarseSize(vdata) = 0;
hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9;
return (void *) vdata;
}
/* Destroy data for V-cycle F-relaxation */
HYPRE_Int
hypre_MGRDestroyFrelaxVcycleData( void *data )
{
hypre_ParAMGData * vdata = (hypre_ParAMGData*) data;
HYPRE_Int i;
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata);
MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST);
for (i = 1; i < num_levels + 1; i++)
{
if (hypre_ParAMGDataAArray(vdata)[i])
{
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]);
}
if (hypre_ParAMGDataPArray(vdata)[i - 1])
{
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i - 1]);
}
hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[i - 1]);
hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]);
hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST);
}
if (num_levels < 1)
{
hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[0]);
}
/* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */
//hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata));
hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST);
//hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST);
/* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */
/*
if (hypre_ParAMGDataZtemp(vdata))
hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata));
*/
if (hypre_ParAMGDataAMat(vdata)) { hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); }
if (hypre_ParAMGDataBVec(vdata)) { hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); }
if (hypre_ParAMGDataCommInfo(vdata)) { hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); }
if (new_comm != hypre_MPI_COMM_NULL)
{
hypre_MPI_Comm_free (&new_comm);
}
hypre_TFree(vdata, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Set C-point variables for each reduction level */
/* Currently not implemented */
HYPRE_Int
hypre_MGRSetReductionLevelCpoints( void *mgr_vdata,
HYPRE_Int nlevels,
HYPRE_Int *num_coarse_points,
HYPRE_Int **level_coarse_indexes)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_coarse_levels) = nlevels;
(mgr_data -> num_coarse_per_level) = num_coarse_points;
(mgr_data -> level_coarse_indexes) = level_coarse_indexes;
return hypre_error_flag;
}
/* Initialize some data */
/* Set whether non-coarse points on each level should be explicitly tagged as F-points */
HYPRE_Int
hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag;
return hypre_error_flag;
}
/* Set whether the reserved C points are reduced before the coarse grid solve */
HYPRE_Int
hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> lvl_to_keep_cpoints) = level;
return hypre_error_flag;
}
/* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */
HYPRE_Int
hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_BigInt *begin_idx_array,
HYPRE_Int *block_num_coarse_points,
HYPRE_Int **block_coarse_indexes)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
if ((mgr_data -> idx_array) != NULL)
{
hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST);
(mgr_data -> idx_array) = NULL;
}
HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST);
if (begin_idx_array != NULL)
{
for (i = 0; i < block_size; i++)
{
index_array[i] = *(begin_idx_array + i);
}
}
hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points,
block_coarse_indexes);
(mgr_data -> idx_array) = index_array;
(mgr_data -> set_c_points_method) = 1;
return hypre_error_flag;
}
/* Initialize/ set local block data information */
HYPRE_Int
hypre_MGRSetCpointsByBlock( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_Int *block_num_coarse_points,
HYPRE_Int **block_coarse_indexes)
{
HYPRE_Int i, j;
HYPRE_Int **block_cf_marker = NULL;
HYPRE_Int *block_num_coarse_indexes = NULL;
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
/* free block cf_marker data if not previously destroyed */
if ((mgr_data -> block_cf_marker) != NULL)
{
for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker)[i] = NULL;
}
}
hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if ((mgr_data -> block_num_coarse_indexes))
{
hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* store block cf_marker */
block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST);
memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int));
}
for (i = 0; i < max_num_levels; i++)
{
for (j = 0; j < block_num_coarse_points[i]; j++)
{
(block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK;
}
}
/* store block_num_coarse_points */
if (max_num_levels > 0)
{
block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_num_coarse_indexes[i] = block_num_coarse_points[i];
}
}
/* set block data */
(mgr_data -> max_num_coarse_levels) = max_num_levels;
(mgr_data -> block_size) = block_size;
(mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes;
(mgr_data -> block_cf_marker) = block_cf_marker;
(mgr_data -> set_c_points_method) = 0;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_Int *lvl_num_coarse_points,
HYPRE_Int **lvl_coarse_indexes,
HYPRE_Int *point_marker_array)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i, j;
HYPRE_Int **block_cf_marker = NULL;
HYPRE_Int *block_num_coarse_indexes = NULL;
/* free block cf_marker data if not previously destroyed */
if ((mgr_data -> block_cf_marker) != NULL)
{
for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker)[i] = NULL;
}
}
hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if ((mgr_data -> block_num_coarse_indexes))
{
hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* store block cf_marker */
block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST);
memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int));
}
for (i = 0; i < max_num_levels; i++)
{
for (j = 0; j < lvl_num_coarse_points[i]; j++)
{
block_cf_marker[i][j] = lvl_coarse_indexes[i][j];
}
}
/* store block_num_coarse_points */
if (max_num_levels > 0)
{
block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_num_coarse_indexes[i] = lvl_num_coarse_points[i];
}
}
/* set block data */
(mgr_data -> max_num_coarse_levels) = max_num_levels;
(mgr_data -> block_size) = block_size;
(mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes;
(mgr_data -> block_cf_marker) = block_cf_marker;
(mgr_data -> point_marker_array) = point_marker_array;
(mgr_data -> set_c_points_method) = 2;
return hypre_error_flag;
}
/*Set number of points that remain part of the coarse grid throughout the hierarchy */
HYPRE_Int
hypre_MGRSetReservedCoarseNodes(void *mgr_vdata,
HYPRE_Int reserved_coarse_size,
HYPRE_BigInt *reserved_cpt_index)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_BigInt *reserved_coarse_indexes = NULL;
HYPRE_Int i;
if (!mgr_data)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! MGR object empty!\n");
return hypre_error_flag;
}
if (reserved_coarse_size < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/* free data not previously destroyed */
if ((mgr_data -> reserved_coarse_indexes))
{
hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> reserved_coarse_indexes) = NULL;
}
/* set reserved coarse nodes */
if (reserved_coarse_size > 0)
{
reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST);
for (i = 0; i < reserved_coarse_size; i++)
{
reserved_coarse_indexes[i] = reserved_cpt_index[i];
}
}
(mgr_data -> reserved_coarse_size) = reserved_coarse_size;
(mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes;
return hypre_error_flag;
}
/* Set CF marker array */
HYPRE_Int
hypre_MGRCoarsen(hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int fixed_coarse_size,
HYPRE_Int *fixed_coarse_indexes,
HYPRE_Int debug_flag,
hypre_IntArray **CF_marker_ptr,
HYPRE_Int cflag)
{
HYPRE_Int *CF_marker = NULL;
HYPRE_Int *cindexes = fixed_coarse_indexes;
HYPRE_Int i, row, nc;
HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* If this is the last level, coarsen onto fixed coarse set */
if (cflag)
{
if (*CF_marker_ptr != NULL)
{
hypre_IntArrayDestroy(*CF_marker_ptr);
}
*CF_marker_ptr = hypre_IntArrayCreate(nloc);
hypre_IntArrayInitialize(*CF_marker_ptr);
hypre_IntArraySetConstantValues(*CF_marker_ptr, FMRK);
CF_marker = hypre_IntArrayData(*CF_marker_ptr);
/* first mark fixed coarse set */
nc = fixed_coarse_size;
for (i = 0; i < nc; i++)
{
CF_marker[cindexes[i]] = CMRK;
}
}
else
{
/* First coarsen to get initial CF splitting.
* This is then followed by updating the CF marker to pass
* coarse information to the next levels. NOTE: It may be
* convenient to implement this way (allows the use of multiple
* coarsening strategies without changing too much code),
* but not necessarily the best option, compared to initializing
* CF_marker first and then coarsening on subgraph which excludes
* the initialized coarse nodes.
*/
hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, CF_marker_ptr);
CF_marker = hypre_IntArrayData(*CF_marker_ptr);
/* Update CF_marker to correct Cpoints marked as Fpoints. */
nc = fixed_coarse_size;
for (i = 0; i < nc; i++)
{
CF_marker[cindexes[i]] = CMRK;
}
/* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate
* between type of F-points (example Ruge coarsening). We do not need that distinction here.
*/
for (row = 0; row < nloc; row++)
{
if (CF_marker[row] == CMRK) { continue; }
CF_marker[row] = FMRK;
}
#if 0
/* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points
* in the next level.
*/
nc = 0;
index_i = 0;
for (row = 0; row < nloc; row++)
{
/* loop through new c-points */
if (CF_marker[row] == CMRK) { nc++; }
else if (CF_marker[row] == S_CMRK)
{
/* previously marked c-point is part of fixed coarse set. Track its current local index */
cindexes[index_i++] = nc;
/* reset c-point from S_CMRK to CMRK */
cf_marker[row] = CMRK;
nc++;
}
/* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate
* between type of F-points (example Ruge coarsening). We do not need that distinction here.
*/
else
{
CF_marker[row] = FMRK;
}
}
/* check if this should be last level */
if ( nc == fixed_coarse_size)
{
last_level = 1;
}
//printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size);
#endif
}
return hypre_error_flag;
}
/* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */
HYPRE_Int
hypre_MGRBuildP( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int method,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *a_diag;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
// HYPRE_Int jj_begin_row,jj_begin_row_offd;
// HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i, i1;
HYPRE_Int j, jl, jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); }
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
{
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; }
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
else
{
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if ((CF_marker[i1] >= 0) && (method > 0))
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if ((CF_marker_offd[i1] >= 0) && (method > 0))
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i = 0; i < num_threads - 1; i++)
{
coarse_counter[i + 1] += coarse_counter[i];
jj_count[i + 1] += jj_count[i];
jj_count_offd[i + 1] += jj_count_offd[i];
}
i = num_threads - 1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) { coarse_shift = coarse_counter[j - 1]; }
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/* index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
big_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt;
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
*/
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
//for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] < 0)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if ( i == i1 ) /* diagonal of A only */
{
a_diag[i] = 1.0 / A_diag_data[jj];
}
}
}
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (jl < rest)
{
ns = jl * size + jl;
ne = (jl + 1) * size + jl + 1;
}
else
{
ns = jl * size + rest;
ne = (jl + 1) * size + rest;
}
jj_counter = 0;
if (jl > 0) { jj_counter = jj_count[jl - 1]; }
jj_counter_offd = 0;
if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
else
{
P_marker_offd = NULL;
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if ((CF_marker[i1] >= 0) && (method > 0))
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
/*
if(method == 0)
{
P_diag_data[jj_counter] = 0.0;
}
*/
if (method == 1)
{
P_diag_data[jj_counter] = - A_diag_data[jj];
}
else if (method == 2)
{
P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i];
}
jj_counter++;
}
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if ((CF_marker_offd[i1] >= 0) && (method > 0))
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
/*
if(method == 0)
{
P_offd_data[jj_counter_offd] = 0.0;
}
*/
if (method == 1)
{
P_offd_data[jj_counter_offd] = - A_offd_data[jj];
}
else if (method == 2)
{
P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i];
}
jj_counter_offd++;
}
}
}
}
P_offd_i[i + 1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(a_diag, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker[i] = 0;
}
num_cols_P_offd = 0;
for (i = 0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) { index++; }
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
if (CF_marker[i] == -3) { CF_marker[i] = -1; }
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return (0);
}
/* Interpolation for MGR - Dynamic Row Sum method */
HYPRE_Int
hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *a_diag;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
// HYPRE_Int jj_begin_row,jj_begin_row_offd;
// HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i, i1;
HYPRE_Int j, jl, jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); }
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; }
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
else
{
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
/*--------------------------------------------------------------------
* Set up the indexes for the DRS method
*--------------------------------------------------------------------*/
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i = 0; i < num_threads - 1; i++)
{
coarse_counter[i + 1] += coarse_counter[i];
jj_count[i + 1] += jj_count[i];
jj_count_offd[i + 1] += jj_count_offd[i];
}
i = num_threads - 1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) { coarse_shift = coarse_counter[j - 1]; }
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
*/
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
//for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if ( i == i1 ) /* diagonal of A only */
{
a_diag[i] = 1.0 / A_diag_data[jj];
}
}
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (jl < rest)
{
ns = jl * size + jl;
ne = (jl + 1) * size + jl + 1;
}
else
{
ns = jl * size + rest;
ne = (jl + 1) * size + rest;
}
jj_counter = 0;
if (jl > 0) { jj_counter = jj_count[jl - 1]; }
jj_counter_offd = 0;
if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
else
{
P_marker_offd = NULL;
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i];
jj_counter++;
}
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i];
jj_counter_offd++;
}
}
}
}
P_offd_i[i + 1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(a_diag, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker[i] = 0;
}
num_cols_P_offd = 0;
for (i = 0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) { index++; }
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
if (CF_marker[i] == -3) { CF_marker[i] = -1; }
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
// hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return (0);
}
/* Scale ParCSR matrix A = scalar * A
* A: the target CSR matrix
* vector: array of real numbers
*/
HYPRE_Int
hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector,
hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j, n_local;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
n_local = hypre_CSRMatrixNumRows(A_diag);
for (i = 0; i < n_local; i++)
{
HYPRE_Real factor = vector[i];
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
A_diag_data[j] *= factor;
}
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
A_offd_data[j] *= factor;
}
}
return (0);
}
/************************************************************
* Available methods:
* 0: inv(A_FF) approximated by its diagonal inverse
* 1: inv(A_FF) approximated by sparse approximate inverse
*************************************************************/
HYPRE_Int
hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
hypre_ParCSRMatrix *RT,
HYPRE_Int bsize,
HYPRE_Int ordering,
HYPRE_Int method,
HYPRE_Int Pmax,
HYPRE_Int keep_stencil,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix **A_h_ptr)
{
HYPRE_Int *c_marker, *f_marker;
HYPRE_Int n_local_fine_grid, i, i1, jj;
hypre_ParCSRMatrix *A_cc;
hypre_ParCSRMatrix *A_ff;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *A_cf;
hypre_ParCSRMatrix *A_h;
hypre_ParCSRMatrix *A_h_correction;
HYPRE_Int max_elmts = Pmax;
// HYPRE_Real wall_time = 0.;
hypre_ParCSRMatrix *P_mod = NULL;
HYPRE_Int my_id;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_MPI_Comm_rank(comm, &my_id);
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A);
n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST);
f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fine_grid; i++)
{
HYPRE_Int point_type = CF_marker[i];
hypre_assert(point_type == 1 || point_type == -1);
c_marker[i] = point_type;
f_marker[i] = -point_type;
}
// get the A_cc sub-block
hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc);
if (method == 0)
{
if (keep_stencil)
{
//wall_time = time_getWallclockSeconds();
hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf);
hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc);
hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff);
// extract the diagonal of A_ff and compute D_ff_inv
hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff);
HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag);
HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag);
HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag);
HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag);
HYPRE_Real *D_ff_inv;
D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fpoints; i++)
{
for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i + 1]; jj++)
{
i1 = A_ff_diag_j[jj];
if ( i == i1 )
{
D_ff_inv[i] = -1.0 / A_ff_diag_data[jj];
}
}
}
// extract the diagonal of A_cf
hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf);
HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag);
HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag);
HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag);
n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag);
HYPRE_Real *D_cf;
D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fpoints; i++)
{
i1 = A_cf_diag_j[A_cf_diag_i[i]];
D_cf[i] = A_cf_diag_data[jj];
}
// compute the triple product
hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc);
hypre_ParCSRMatrixLeftScale(D_cf, A_fc);
A_h_correction = A_fc;
hypre_TFree(D_cf, HYPRE_MEMORY_HOST);
hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_cf);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time);
}
else
{
//wall_time = time_getWallclockSeconds();
P_mod = hypre_ParCSRMatrixCompleteClone(P);
hypre_ParCSRMatrixCopy(P, P_mod, 1);
HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod);
hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod);
HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag);
HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag);
for (i = 0; i < n_local_rows; i ++)
{
if (CF_marker[i] >= 0)
{
HYPRE_Int ii = P_mod_diag_i[i];
P_mod_diag_data[ii] = 0.0;
}
}
hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Compute triple product time new: %1.5f\n", wall_time);
hypre_ParCSRMatrixDestroy(P_mod);
}
}
else
{
// Approximate inverse for ideal interploation
hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf);
hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc);
hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff);
hypre_ParCSRMatrix *A_ff_inv = NULL;
hypre_ParCSRMatrix *minus_Wp = NULL;
hypre_MGRApproximateInverse(A_ff, &A_ff_inv);
minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc);
A_h_correction = hypre_ParMatmul(A_cf, minus_Wp);
hypre_ParCSRMatrixDestroy(minus_Wp);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(A_cf);
}
// perform dropping for A_h_correction
// specific to multiphase poromechanics
// we only keep the diagonal of each block
//wall_time = time_getWallclockSeconds();
HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction));
hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction);
HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag);
HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag);
HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag);
HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag);
hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction);
HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd);
HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd);
HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd);
if (Pmax > 0)
{
if (ordering == 0) // interleaved ordering
{
HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1,
memory_location);
HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int,
(bsize + max_elmts) * n_local_cpoints, memory_location);
HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex,
(bsize + max_elmts) * n_local_cpoints, memory_location);
HYPRE_Int num_nonzeros_diag_new = 0;
HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1,
memory_location);
HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts * n_local_cpoints,
memory_location);
HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex,
max_elmts * n_local_cpoints, memory_location);
HYPRE_Int num_nonzeros_offd_new = 0;
for (i = 0; i < n_local_cpoints; i++)
{
HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i + 1] - A_h_correction_diag_i[i] +
A_h_correction_offd_i[i + 1] - A_h_correction_offd_i[i];
HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST);
HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST);
HYPRE_Int row_start = i - (i % bsize);
HYPRE_Int row_stop = row_start + bsize - 1;
HYPRE_Int cnt = 0;
for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i + 1]; jj++)
{
aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag;
aux_data[cnt] = A_h_correction_offd_data[jj];
cnt++;
}
for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++)
{
aux_j[cnt] = A_h_correction_diag_j[jj];
aux_data[cnt] = A_h_correction_diag_data[jj];
cnt++;
}
hypre_qsort2_abs(aux_j, aux_data, 0, cnt - 1);
for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++)
{
i1 = A_h_correction_diag_j[jj];
if (i1 >= row_start && i1 <= row_stop)
{
// copy data to new arrays
A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1;
A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj];
++num_nonzeros_diag_new;
}
else
{
// Do nothing
}
}
if (max_elmts > 0)
{
for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++)
{
HYPRE_Int col_idx = aux_j[jj];
HYPRE_Real col_value = aux_data[jj];
if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop))
{
A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx;
A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value;
++num_nonzeros_diag_new;
}
else if (col_idx >= ncol_diag)
{
A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag;
A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value;
++num_nonzeros_offd_new;
}
}
}
A_h_correction_diag_i_new[i + 1] = num_nonzeros_diag_new;
A_h_correction_offd_i_new[i + 1] = num_nonzeros_offd_new;
hypre_TFree(aux_j, HYPRE_MEMORY_HOST);
hypre_TFree(aux_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(A_h_correction_diag_i, memory_location);
hypre_TFree(A_h_correction_diag_j, memory_location);
hypre_TFree(A_h_correction_diag_data, memory_location);
hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new;
hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new;
hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new;
hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new;
if (A_h_correction_offd_i) { hypre_TFree(A_h_correction_offd_i, memory_location); }
if (A_h_correction_offd_j) { hypre_TFree(A_h_correction_offd_j, memory_location); }
if (A_h_correction_offd_data) { hypre_TFree(A_h_correction_offd_data, memory_location); }
hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new;
hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new;
hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new;
hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new;
}
else
{
hypre_printf("Error!! Block ordering for non-Galerkin coarse grid is not currently supported\n");
exit(-1);
}
}
//hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time);
//hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered");
// coarse grid / schur complement
hypre_ParCSRMatrixAdd(1.0, A_cc, 1.0, A_h_correction, &A_h);
*A_h_ptr = A_h;
//hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h");
hypre_ParCSRMatrixDestroy(A_cc);
hypre_ParCSRMatrixDestroy(A_h_correction);
hypre_TFree(c_marker, HYPRE_MEMORY_HOST);
hypre_TFree(f_marker, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A,
HYPRE_BigInt *mgr_idx_array,
HYPRE_Solver A_ff_solver)
{
HYPRE_Int *U_marker, *S_marker, *P_marker;
HYPRE_Int n_fine, i;
HYPRE_BigInt ibegin;
hypre_ParCSRMatrix *A_up;
hypre_ParCSRMatrix *A_uu;
hypre_ParCSRMatrix *A_su;
hypre_ParCSRMatrix *A_pu;
hypre_ParVector *e1_vector;
hypre_ParVector *e2_vector;
hypre_ParVector *e3_vector;
hypre_ParVector *e4_vector;
hypre_ParVector *e5_vector;
n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
ibegin = hypre_ParCSRMatrixFirstRowIndex(A);
hypre_assert(ibegin == mgr_idx_array[0]);
U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
U_marker[i] = -1;
S_marker[i] = -1;
P_marker[i] = -1;
}
// create C and F markers
for (i = 0; i < n_fine; i++)
{
if (i < mgr_idx_array[1] - ibegin)
{
U_marker[i] = 1;
}
else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin))
{
S_marker[i] = 1;
}
else
{
P_marker[i] = 1;
}
}
// Get A_up
hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up);
// GetA_uu
hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu);
// Get A_su
hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su);
// Get A_pu
hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu);
e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up),
hypre_ParCSRMatrixGlobalNumCols(A_up),
hypre_ParCSRMatrixColStarts(A_up));
hypre_ParVectorInitialize(e1_vector);
hypre_ParVectorSetConstantValues(e1_vector, 1.0);
e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu),
hypre_ParCSRMatrixGlobalNumRows(A_uu),
hypre_ParCSRMatrixRowStarts(A_uu));
hypre_ParVectorInitialize(e2_vector);
hypre_ParVectorSetConstantValues(e2_vector, 0.0);
e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu),
hypre_ParCSRMatrixGlobalNumRows(A_uu),
hypre_ParCSRMatrixRowStarts(A_uu));
hypre_ParVectorInitialize(e3_vector);
hypre_ParVectorSetConstantValues(e3_vector, 0.0);
e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su),
hypre_ParCSRMatrixGlobalNumRows(A_su),
hypre_ParCSRMatrixRowStarts(A_su));
hypre_ParVectorInitialize(e4_vector);
hypre_ParVectorSetConstantValues(e4_vector, 0.0);
e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu),
hypre_ParCSRMatrixGlobalNumRows(A_pu),
hypre_ParCSRMatrixRowStarts(A_pu));
hypre_ParVectorInitialize(e5_vector);
hypre_ParVectorSetConstantValues(e5_vector, 0.0);
// compute e2 = A_up * e1
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector);
// solve e3 = A_uu^-1 * e2
hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector);
// compute e4 = A_su * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector);
// compute e4 = A_su * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector);
// print e4
hypre_ParVectorPrintIJ(e4_vector, 1, "Dsp");
// compute e5 = A_pu * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector);
hypre_ParVectorPrintIJ(e5_vector, 1, "Dpp");
hypre_ParVectorDestroy(e1_vector);
hypre_ParVectorDestroy(e2_vector);
hypre_ParVectorDestroy(e3_vector);
hypre_ParCSRMatrixDestroy(A_uu);
hypre_ParCSRMatrixDestroy(A_up);
hypre_ParCSRMatrixDestroy(A_pu);
hypre_ParCSRMatrixDestroy(A_su);
hypre_TFree(U_marker, HYPRE_MEMORY_HOST);
hypre_TFree(S_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix **A_inv)
{
HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version;
HYPRE_Real mr_tol, nsh_tol;
HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrix *approx_A_inv = NULL;
print_level = 0;
nsh_max_iter = 2;
nsh_max_row_nnz = 2; // default 1000
mr_max_iter = 1;
mr_tol = 1.0e-3;
mr_max_row_nnz = 2; // default 800
mr_col_version = 0;
nsh_tol = 1.0e-3;
droptol[0] = 1.0e-2;
droptol[1] = 1.0e-2;
hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz,
nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level);
*A_inv = approx_A_inv;
if (droptol) { hypre_TFree(droptol, HYPRE_MEMORY_HOST); }
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *S,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
HYPRE_Int *C_marker;
HYPRE_Int *F_marker;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *minus_Wp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int coarse_counter;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
// HYPRE_BigInt my_first_cpt;
HYPRE_Int i, jj;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
// HYPRE_Int num_threads;
// HYPRE_Real wall_time; /* for debugging instrumentation */
C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
// create C and F markers
for (i = 0; i < n_fine; i++)
{
C_marker[i] = (CF_marker[i] == 1) ? 1 : -1;
F_marker[i] = (CF_marker[i] == 1) ? -1 : 1;
}
// Get A_FC
hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc);
// compute -Wp
minus_Wp = hypre_ParMatmul(S, A_fc);
hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp);
HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag);
hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
// num_threads = hypre_NumThreads();
// my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; }
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
HYPRE_Int row_counter = 0;
coarse_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
else
{
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++)
{
jj_counter++;
}
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++)
{
jj_counter_offd++;
}
}
row_counter++;
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
row_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++)
{
P_diag_j[jj_counter] = minus_Wp_diag_j[jj];
P_diag_data[jj_counter] = - minus_Wp_diag_data[jj];
jj_counter++;
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++)
{
P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj];
P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj];
jj_counter_offd++;
}
}
row_counter++;
}
P_offd_i[i + 1] = jj_counter_offd;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp);
if (P_offd_size)
{
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_P_offd; i++)
{
col_map_offd_P[i] = col_map_offd_tmp[i];
}
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(C_marker, HYPRE_MEMORY_HOST);
hypre_TFree(F_marker, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(minus_Wp);
return 0;
}
HYPRE_Int
hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
HYPRE_Int *C_marker;
HYPRE_Int *F_marker;
hypre_ParCSRMatrix *A_ff;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *A_ff_inv;
hypre_ParCSRMatrix *minus_Wp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter, jj_counter_offd;
//HYPRE_Int jj_begin_row,jj_begin_row_offd;
//HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *fine_to_coarse = NULL;
//HYPRE_Int *coarse_counter;
HYPRE_Int coarse_counter;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
// HYPRE_BigInt my_first_cpt;
HYPRE_Int i, jj;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
// HYPRE_Int num_threads;
// HYPRE_Real wall_time; /* for debugging instrumentation */
C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
// create C and F markers
for (i = 0; i < n_fine; i++)
{
C_marker[i] = (CF_marker[i] == 1) ? 1 : -1;
F_marker[i] = (CF_marker[i] == 1) ? -1 : 1;
}
// Get A_FF
hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff);
// hypre_ParCSRMatrixPrintIJ(A_ff, 1, 1, "A_ff");
// Get A_FC
hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc);
hypre_MGRApproximateInverse(A_ff, &A_ff_inv);
// hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv");
// hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc");
minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc);
// hypre_ParCSRMatrixPrintIJ(minus_Wp, 1, 1, "Wp");
hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp);
HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag);
hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd);
//hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
//HYPRE_Int num_cols_minus_Wp_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
// num_threads = hypre_NumThreads();
// my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
//coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
//jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
//jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; }
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
HYPRE_Int row_counter = 0;
coarse_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
//jj_count[j]++;
//fine_to_coarse[i] = coarse_counter[j];
//coarse_counter[j]++;
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
else
{
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++)
{
//jj_count[j]++;
jj_counter++;
}
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++)
{
//jj_count_offd[j]++;
jj_counter_offd++;
}
}
row_counter++;
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
/*
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
*/
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
*/
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
/*
if (num_procs > 1)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] += my_first_cpt;
}
comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(minus_Wp);
comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
}
*/
row_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++)
{
//P_marker[row_counter] = jj_counter;
P_diag_j[jj_counter] = minus_Wp_diag_j[jj];
P_diag_data[jj_counter] = - minus_Wp_diag_data[jj];
jj_counter++;
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++)
{
//P_marker_offd[row_counter] = jj_counter_offd;
P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj];
P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj];
jj_counter_offd++;
}
}
row_counter++;
}
P_offd_i[i + 1] = jj_counter_offd;
}
//hypre_printf("Num rows of Wp = %d\n", row_counter);
//P_offd_i[row_counter] = jj_counter_offd;
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp);
if (P_offd_size)
{
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_P_offd; i++)
{
col_map_offd_P[i] = col_map_offd_tmp[i];
}
}
/*
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_minus_Wp_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
*/
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
//hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd);
*P_ptr = P;
//hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//if (fine_to_coarse_offd) hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
//hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
//hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
hypre_TFree(C_marker, HYPRE_MEMORY_HOST);
hypre_TFree(F_marker, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(A_ff_inv);
hypre_ParCSRMatrixDestroy(minus_Wp);
return 0;
}
/* Setup interpolation operator */
HYPRE_Int
hypre_MGRBuildInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P,
HYPRE_Int interp_type,
HYPRE_Int numsweeps)
{
// HYPRE_Int i;
hypre_ParCSRMatrix *P_ptr = NULL;
// HYPRE_Real jac_trunc_threshold = trunc_factor;
// HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold;
/* Interpolation for each level */
if (interp_type < 3)
{
hypre_MGRBuildP( A, CF_marker, num_cpts_global, interp_type, debug_flag, &P_ptr);
/* Could do a few sweeps of Jacobi to further improve Jacobi interpolation P */
/*
if(interp_type == 2)
{
for(i=0; i<numsweeps; i++)
{
hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus );
}
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
*/
}
else if (interp_type == 4)
{
hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr);
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
else if (interp_type == 99)
{
hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr);
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
else
{
/* Classical modified interpolation */
hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag,
trunc_factor, max_elmts, &P_ptr);
}
/* set pointer to P */
*P = P_ptr;
return hypre_error_flag;
}
/* Setup restriction operator */
HYPRE_Int
hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Real strong_threshold,
HYPRE_Real max_row_sum,
hypre_ParCSRMatrix **R,
HYPRE_Int restrict_type,
HYPRE_Int numsweeps)
{
// HYPRE_Int i;
hypre_ParCSRMatrix *R_ptr = NULL;
hypre_ParCSRMatrix *AT = NULL;
hypre_ParCSRMatrix *ST = NULL;
// HYPRE_Real jac_trunc_threshold = trunc_factor;
// HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold;
/* Build AT (transpose A) */
if (restrict_type > 0)
{
hypre_ParCSRMatrixTranspose(A, &AT, 1);
}
/* Restriction for each level */
if (restrict_type == 0)
{
hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr);
}
else if (restrict_type == 1 || restrict_type == 2)
{
hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr);
}
else if (restrict_type == 3)
{
/* move diagonal to first entry */
hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(AT));
hypre_MGRBuildInterpApproximateInverse(AT, CF_marker, num_cpts_global, debug_flag, &R_ptr);
hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts);
}
else
{
/* Build new strength matrix */
hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST);
/* Classical modified interpolation */
hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global, 1, NULL, debug_flag,
trunc_factor, max_elmts, &R_ptr);
}
/* set pointer to P */
*R = R_ptr;
/* Free memory */
if (restrict_type > 0)
{
hypre_ParCSRMatrixDestroy(AT);
}
if (restrict_type > 5)
{
hypre_ParCSRMatrixDestroy(ST);
}
return hypre_error_flag;
}
void hypre_blas_smat_inv_n4 (HYPRE_Real *a)
{
const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3];
const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7];
const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11];
const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15];
const HYPRE_Real M11 = a22 * a33 * a44 + a23 * a34 * a42 + a24 * a32 * a43 - a22 * a34 * a43 - a23 *
a32 * a44 - a24 * a33 * a42;
const HYPRE_Real M12 = a12 * a34 * a43 + a13 * a32 * a44 + a14 * a33 * a42 - a12 * a33 * a44 - a13 *
a34 * a42 - a14 * a32 * a43;
const HYPRE_Real M13 = a12 * a23 * a44 + a13 * a24 * a42 + a14 * a22 * a43 - a12 * a24 * a43 - a13 *
a22 * a44 - a14 * a23 * a42;
const HYPRE_Real M14 = a12 * a24 * a33 + a13 * a22 * a34 + a14 * a23 * a32 - a12 * a23 * a34 - a13 *
a24 * a32 - a14 * a22 * a33;
const HYPRE_Real M21 = a21 * a34 * a43 + a23 * a31 * a44 + a24 * a33 * a41 - a21 * a33 * a44 - a23 *
a34 * a41 - a24 * a31 * a43;
const HYPRE_Real M22 = a11 * a33 * a44 + a13 * a34 * a41 + a14 * a31 * a43 - a11 * a34 * a43 - a13 *
a31 * a44 - a14 * a33 * a41;
const HYPRE_Real M23 = a11 * a24 * a43 + a13 * a21 * a44 + a14 * a23 * a41 - a11 * a23 * a44 - a13 *
a24 * a41 - a14 * a21 * a43;
const HYPRE_Real M24 = a11 * a23 * a34 + a13 * a24 * a31 + a14 * a21 * a33 - a11 * a24 * a33 - a13 *
a21 * a34 - a14 * a23 * a31;
const HYPRE_Real M31 = a21 * a32 * a44 + a22 * a34 * a41 + a24 * a31 * a42 - a21 * a34 * a42 - a22 *
a31 * a44 - a24 * a32 * a41;
const HYPRE_Real M32 = a11 * a34 * a42 + a12 * a31 * a44 + a14 * a32 * a41 - a11 * a32 * a44 - a12 *
a34 * a41 - a14 * a31 * a42;
const HYPRE_Real M33 = a11 * a22 * a44 + a12 * a24 * a41 + a14 * a21 * a42 - a11 * a24 * a42 - a12 *
a21 * a44 - a14 * a22 * a41;
const HYPRE_Real M34 = a11 * a24 * a32 + a12 * a21 * a34 + a14 * a22 * a31 - a11 * a22 * a34 - a12 *
a24 * a31 - a14 * a21 * a32;
const HYPRE_Real M41 = a21 * a33 * a42 + a22 * a31 * a43 + a23 * a32 * a41 - a21 * a32 * a43 - a22 *
a33 * a41 - a23 * a31 * a42;
const HYPRE_Real M42 = a11 * a32 * a43 + a12 * a33 * a41 + a13 * a31 * a42 - a11 * a33 * a42 - a12 *
a31 * a43 - a13 * a32 * a41;
const HYPRE_Real M43 = a11 * a23 * a42 + a12 * a21 * a43 + a13 * a22 * a41 - a11 * a22 * a43 - a12 *
a23 * a41 - a13 * a21 * a42;
const HYPRE_Real M44 = a11 * a22 * a33 + a12 * a23 * a31 + a13 * a21 * a32 - a11 * a23 * a32 - a12 *
a21 * a33 - a13 * a22 * a31;
const HYPRE_Real det = a11 * M11 + a12 * M21 + a13 * M31 + a14 * M41;
HYPRE_Real det_inv;
//if ( fabs(det) < 1e-22 ) {
//hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det);
/*
printf("##----------------------------------------------\n");
printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2);
printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5);
printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7);
printf("##----------------------------------------------\n");
getchar();
*/
//}
det_inv = 1.0 / det;
a[0] = M11 * det_inv; a[1] = M12 * det_inv; a[2] = M13 * det_inv; a[3] = M14 * det_inv;
a[4] = M21 * det_inv; a[5] = M22 * det_inv; a[6] = M23 * det_inv; a[7] = M24 * det_inv;
a[8] = M31 * det_inv; a[9] = M32 * det_inv; a[10] = M33 * det_inv; a[11] = M34 * det_inv;
a[12] = M41 * det_inv; a[13] = M42 * det_inv; a[14] = M43 * det_inv; a[15] = M44 * det_inv;
}
void hypre_blas_mat_inv(HYPRE_Real *a,
HYPRE_Int n)
{
HYPRE_Int i, j, k, l, u, kn, in;
HYPRE_Real alinv;
if (n == 4)
{
hypre_blas_smat_inv_n4(a);
}
else
{
for (k = 0; k < n; ++k)
{
kn = k * n;
l = kn + k;
//if (fabs(a[l]) < SMALLREAL) {
// printf("### WARNING: Diagonal entry is close to zero!");
// printf("### WARNING: diag_%d=%e\n", k, a[l]);
// a[l] = SMALLREAL;
//}
alinv = 1.0 / a[l];
a[l] = alinv;
for (j = 0; j < k; ++j)
{
u = kn + j; a[u] *= alinv;
}
for (j = k + 1; j < n; ++j)
{
u = kn + j; a[u] *= alinv;
}
for (i = 0; i < k; ++i)
{
in = i * n;
for (j = 0; j < n; ++j)
if (j != k)
{
u = in + j; a[u] -= a[in + k] * a[kn + j];
} // end if (j!=k)
}
for (i = k + 1; i < n; ++i)
{
in = i * n;
for (j = 0; j < n; ++j)
if (j != k)
{
u = in + j; a[u] -= a[in + k] * a[kn + j];
} // end if (j!=k)
}
for (i = 0; i < k; ++i)
{
u = i * n + k; a[u] *= -alinv;
}
for (i = k + 1; i < n; ++i)
{
u = i * n + k; a[u] *= -alinv;
}
} // end for (k=0; k<n; ++k)
}// end if
}
HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr,
void *mgr_vdata, HYPRE_Int debug_flag)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int num_procs, my_id;
HYPRE_Int blk_size = (mgr_data -> block_size);
HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_ParCSRMatrix *B;
hypre_CSRMatrix *B_diag;
HYPRE_Real *B_diag_data;
HYPRE_Int *B_diag_i;
HYPRE_Int *B_diag_j;
hypre_CSRMatrix *B_offd;
HYPRE_Int i, ii;
HYPRE_Int j, jj;
HYPRE_Int k;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int n_block, left_size, inv_size;
// HYPRE_Real wall_time; /* for debugging instrumentation */
HYPRE_Int bidx, bidxm1, bidxp1;
HYPRE_Real * diaginv;
const HYPRE_Int nb2 = blk_size * blk_size;
HYPRE_Int block_scaling_error = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
//printf("n = %d\n",n);
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size * n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size * n_block;
}
inv_size = nb2 * n_block + left_size * left_size;
//printf("inv_size = %d\n",inv_size);
hypre_blockRelax_setup(A, blk_size, reserved_coarse_size, &(mgr_data -> diaginv));
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*-----------------------------------------------------------------------
* First Pass: Determine size of B and fill in
*-----------------------------------------------------------------------*/
B_diag_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST);
B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST);
B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
B_diag_i[n] = inv_size;
//B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
//B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
//B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST);
//B_offd_i[n] = 1;
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST);
//printf("n_block = %d\n",n_block);
for (i = 0; i < n_block; i++)
{
bidxm1 = i * blk_size;
bidxp1 = (i + 1) * blk_size;
for (k = 0; k < blk_size; k++)
{
for (j = 0; j < blk_size; j++)
{
bidx = k * blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = k * blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
/* for (k = 0;k < blk_size; k++) */
/* { */
/* for (j = 0;j < blk_size; j++) */
/* { */
/* bidx = k*blk_size + j; */
/* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */
/* } */
/* } */
hypre_blas_mat_inv(diaginv, blk_size);
for (k = 0; k < blk_size; k++)
{
B_diag_i[i * blk_size + k] = i * nb2 + k * blk_size;
//B_offd_i[i*nb2+k] = 0;
for (j = 0; j < blk_size; j++)
{
bidx = i * nb2 + k * blk_size + j;
B_diag_j[bidx] = i * blk_size + j;
B_diag_data[bidx] = diaginv[k * blk_size + j];
}
}
}
//printf("Before create\n");
B = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
0,
inv_size,
0);
//printf("After create\n");
B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrixData(B_diag) = B_diag_data;
hypre_CSRMatrixI(B_diag) = B_diag_i;
hypre_CSRMatrixJ(B_diag) = B_diag_j;
B_offd = hypre_ParCSRMatrixOffd(B);
hypre_CSRMatrixData(B_offd) = NULL;
hypre_CSRMatrixI(B_offd) = NULL;
hypre_CSRMatrixJ(B_offd) = NULL;
*B_ptr = B;
return (block_scaling_error);
}
HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Real blk_size,
HYPRE_Int n_block,
HYPRE_Int left_size,
HYPRE_Int method,
HYPRE_Real *diaginv,
hypre_ParVector *Vtemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx, bidx1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id;
HYPRE_Real *res;
const HYPRE_Int nb2 = blk_size * blk_size;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
//printf("u_old[%d] = %e\n",i,Vtemp_data[i]);
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax points block by block
*-----------------------------------------------------------------*/
for (i = 0; i < n_block; i++)
{
for (j = 0; j < blk_size; j++)
{
bidx = i * blk_size + j;
res[j] = f_data[bidx];
for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++)
{
ii = A_diag_j[jj];
if (method == 0)
{
// Jacobi for diagonal part
res[j] -= A_diag_data[jj] * Vtemp_data[ii];
}
else if (method == 1)
{
// Gauss-Seidel for diagonal part
res[j] -= A_diag_data[jj] * u_data[ii];
}
else
{
// Default do Jacobi for diagonal part
res[j] -= A_diag_data[jj] * Vtemp_data[ii];
}
//printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]);
}
for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++)
{
// always do Jacobi for off-diagonal part
ii = A_offd_j[jj];
res[j] -= A_offd_data[jj] * Vext_data[ii];
}
//printf("%d: res = %e\n",bidx,res[j]);
}
for (j = 0; j < blk_size; j++)
{
bidx1 = i * blk_size + j;
for (k = 0; k < blk_size; k++)
{
bidx = i * nb2 + j * blk_size + k;
u_data[bidx1] += res[k] * diaginv[bidx];
//printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]);
}
//printf("u[%d] = %e\n",bidx1,u_data[bidx1]);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(res, HYPRE_MEMORY_HOST);
return (relax_error);
}
HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Real blk_size,
HYPRE_Int n_block,
HYPRE_Int left_size,
HYPRE_Real *diaginv,
hypre_ParVector *Vtemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx, bidx1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id;
HYPRE_Real *res;
const HYPRE_Int nb2 = blk_size * blk_size;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
//printf("u_old[%d] = %e\n",i,Vtemp_data[i]);
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax points block by block
*-----------------------------------------------------------------*/
for (i = 0; i < n_block; i++)
{
for (j = 0; j < blk_size; j++)
{
bidx = i * blk_size + j;
res[j] = f_data[bidx];
for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++)
{
ii = A_diag_j[jj];
//res[j] -= A_diag_data[jj] * Vtemp_data[ii];
//printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]);
res[j] -= A_diag_data[jj] * u_data[ii];
//printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]);
}
for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++)
{
ii = A_offd_j[jj];
res[j] -= A_offd_data[jj] * Vext_data[ii];
}
//printf("%d: res = %e\n",bidx,res[j]);
}
for (j = 0; j < blk_size; j++)
{
bidx1 = i * blk_size + j;
for (k = 0; k < blk_size; k++)
{
bidx = i * nb2 + j * blk_size + k;
u_data[bidx1] += res[k] * diaginv[bidx];
//printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]);
}
//printf("u[%d] = %e\n",bidx1,u_data[bidx1]);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(res, HYPRE_MEMORY_HOST);
return (relax_error);
}
/*Block smoother*/
HYPRE_Int
hypre_blockRelax_setup(hypre_ParCSRMatrix *A,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Real **diaginvptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx, bidxm1, bidxp1;
HYPRE_Int num_procs, my_id;
const HYPRE_Int nb2 = blk_size * blk_size;
HYPRE_Int n_block;
HYPRE_Int left_size, inv_size;
HYPRE_Real *diaginv = *diaginvptr;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size * n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size * n_block;
}
inv_size = nb2 * n_block + left_size * left_size;
if (diaginv != NULL)
{
hypre_TFree(diaginv, HYPRE_MEMORY_HOST);
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
}
else
{
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
for (i = 0; i < n_block; i++)
{
bidxm1 = i * blk_size;
bidxp1 = (i + 1) * blk_size;
//printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1);
for (k = 0; k < blk_size; k++)
{
for (j = 0; j < blk_size; j++)
{
bidx = i * nb2 + k * blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = i * nb2 + k * blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
}
for (i = 0; i < left_size; i++)
{
bidxm1 = n_block * nb2 + i * blk_size;
bidxp1 = n_block * nb2 + (i + 1) * blk_size;
for (j = 0; j < left_size; j++)
{
bidx = n_block * nb2 + i * blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++)
{
jj = A_diag_j[ii];
if (jj > n_block * blk_size)
{
bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size;
diaginv[bidx] = A_diag_data[ii];
}
}
}
/*-----------------------------------------------------------------
* compute the inverses of all the diagonal sub-blocks
*-----------------------------------------------------------------*/
if (blk_size > 1)
{
for (i = 0; i < n_block; i++)
{
hypre_blas_mat_inv(diaginv + i * nb2, blk_size);
}
hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size);
}
else
{
for (i = 0; i < n; i++)
{
// FIX-ME: zero-diagonal should be tested previously
if (fabs(diaginv[i]) < SMALLREAL)
{
diaginv[i] = 0.0;
}
else
{
diaginv[i] = 1.0 / diaginv[i];
}
}
}
*diaginvptr = diaginv;
return 1;
}
HYPRE_Int
hypre_blockRelax(hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Int method,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx, bidxm1, bidxp1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_procs, my_id;
const HYPRE_Int nb2 = blk_size * blk_size;
HYPRE_Int n_block;
HYPRE_Int left_size, inv_size;
HYPRE_Real *diaginv;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size * n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size * n_block;
}
inv_size = nb2 * n_block + left_size * left_size;
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
for (i = 0; i < n_block; i++)
{
bidxm1 = i * blk_size;
bidxp1 = (i + 1) * blk_size;
//printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1);
for (k = 0; k < blk_size; k++)
{
for (j = 0; j < blk_size; j++)
{
bidx = i * nb2 + k * blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = i * nb2 + k * blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
}
for (i = 0; i < left_size; i++)
{
bidxm1 = n_block * nb2 + i * blk_size;
bidxp1 = n_block * nb2 + (i + 1) * blk_size;
for (j = 0; j < left_size; j++)
{
bidx = n_block * nb2 + i * blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++)
{
jj = A_diag_j[ii];
if (jj > n_block * blk_size)
{
bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size;
diaginv[bidx] = A_diag_data[ii];
}
}
}
/*
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
for (k = 0;k < blk_size; k ++)
{
bidx = i*nb2 + j*blk_size + k;
printf("%e\t",diaginv[bidx]);
}
printf("\n");
}
printf("\n");
}
*/
/*-----------------------------------------------------------------
* compute the inverses of all the diagonal sub-blocks
*-----------------------------------------------------------------*/
if (blk_size > 1)
{
for (i = 0; i < n_block; i++)
{
hypre_blas_mat_inv(diaginv + i * nb2, blk_size);
}
hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size);
/*
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
for (k = 0;k < blk_size; k ++)
{
bidx = i*nb2 + j*blk_size + k;
printf("%e\t",diaginv[bidx]);
}
printf("\n");
}
printf("\n");
}
*/
}
else
{
for (i = 0; i < n; i++)
{
// FIX-ME: zero-diagonal should be tested previously
if (fabs(diaginv[i]) < SMALLREAL)
{
diaginv[i] = 0.0;
}
else
{
diaginv[i] = 1.0 / diaginv[i];
}
}
}
hypre_blockRelax_solve(A, f, u, blk_size, n_block, left_size, method, diaginv, Vtemp);
/*-----------------------------------------------------------------
* Free temperary memeory
*-----------------------------------------------------------------*/
hypre_TFree(diaginv, HYPRE_MEMORY_HOST);
return (relax_error);
}
/* set coarse grid solver */
HYPRE_Int
hypre_MGRSetFSolver( void *mgr_vdata,
HYPRE_Int (*fine_grid_solver_solve)(void*, void*, void*, void*),
HYPRE_Int (*fine_grid_solver_setup)(void*, void*, void*, void*),
void *fsolver )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
HYPRE_Solver **aff_solver = (mgr_data -> aff_solver);
if (aff_solver == NULL)
{
aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST);
}
/* only allow to set F-solver for the first level */
aff_solver[0] = (HYPRE_Solver *) fsolver;
(mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve;
(mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup;
(mgr_data -> aff_solver) = aff_solver;
(mgr_data -> use_default_fsolver) = 0;
return hypre_error_flag;
}
/* set coarse grid solver */
HYPRE_Int
hypre_MGRSetCoarseSolver( void *mgr_vdata,
HYPRE_Int (*coarse_grid_solver_solve)(void*, void*, void*, void*),
HYPRE_Int (*coarse_grid_solver_setup)(void*, void*, void*, void*),
void *coarse_grid_solver )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
(mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve;
(mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup;
(mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver;
(mgr_data -> use_default_cgrid_solver) = 0;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRSetAffInv( void *mgr_vdata,
hypre_ParCSRMatrix *A_ff_inv )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> A_ff_inv) = A_ff_inv;
return hypre_error_flag;
}
/* Set the maximum number of coarse levels.
* maxcoarselevs = 1 yields the default 2-grid scheme.
*/
HYPRE_Int
hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> max_num_coarse_levels) = maxcoarselevs;
return hypre_error_flag;
}
/* Set the system block size */
HYPRE_Int
hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> block_size) = bsize;
return hypre_error_flag;
}
/* Set the relaxation type for the fine levels of the reduction.
* Currently supports the following flavors of relaxation types
* as described in the documentation:
* relax_types 0 - 8, 13, 14, 18, 19, 98.
* See par_relax.c and par_relax_more.c for more details.
*
*/
HYPRE_Int
hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> relax_type) = relax_type;
return hypre_error_flag;
}
/* Set the number of relaxation sweeps */
HYPRE_Int
hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_relax_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the F-relaxation strategy: 0=single level, 1=multi level */
HYPRE_Int
hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> Frelax_method) != NULL)
{
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = relax_method;
}
(mgr_data -> Frelax_method) = Frelax_method;
return hypre_error_flag;
}
/* Set the F-relaxation strategy: 0=single level, 1=multi level */
HYPRE_Int
hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> Frelax_method) != NULL)
{
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (relax_method != NULL)
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = relax_method[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = 0;
}
}
(mgr_data -> Frelax_method) = Frelax_method;
return hypre_error_flag;
}
/* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/
HYPRE_Int
hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> use_non_galerkin_cg) != NULL)
{
hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST);
(mgr_data -> use_non_galerkin_cg) = NULL;
}
HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (cg_method != NULL)
{
for (i = 0; i < max_num_coarse_levels; i++)
{
use_non_galerkin_cg[i] = cg_method[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
use_non_galerkin_cg[i] = 0;
}
}
(mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg;
return hypre_error_flag;
}
/* Set the F-relaxation number of functions for each level */
HYPRE_Int
hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> Frelax_num_functions) != NULL)
{
hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_num_functions) = NULL;
}
HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels,
HYPRE_MEMORY_HOST);
if (num_functions != NULL)
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_num_functions[i] = num_functions[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_num_functions[i] = 1;
}
}
(mgr_data -> Frelax_num_functions) = Frelax_num_functions;
return hypre_error_flag;
}
/* Set the type of the restriction type
* for computing restriction operator
*/
HYPRE_Int
hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> restrict_type) != NULL)
{
hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (restrict_type != NULL)
{
for (i = 0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = *(restrict_type + i);
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = 0;
}
}
(mgr_data -> restrict_type) = level_restrict_type;
return hypre_error_flag;
}
/* Set the type of the restriction type
* for computing restriction operator
*/
HYPRE_Int
hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> restrict_type) != NULL)
{
hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = restrict_type;
}
(mgr_data -> restrict_type) = level_restrict_type;
return hypre_error_flag;
}
/* Set the number of Jacobi interpolation iterations
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_restrict_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the type of the interpolation
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> interp_type) != NULL)
{
hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = interpType;
}
(mgr_data -> interp_type) = level_interp_type;
return hypre_error_flag;
}
/* Set the type of the interpolation
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> interp_type) != NULL)
{
hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (interpType != NULL)
{
for (i = 0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = *(interpType + i);
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = 2;
}
}
(mgr_data -> interp_type) = level_interp_type;
return hypre_error_flag;
}
/* Set the number of Jacobi interpolation iterations
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_interp_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the threshold to truncate the coarse grid at each
* level of reduction
*/
HYPRE_Int
hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> truncate_coarse_grid_threshold) = threshold;
return hypre_error_flag;
}
/* Set print level for F-relaxation solver */
HYPRE_Int
hypre_MGRSetFrelaxPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> frelax_print_level) = print_level;
return hypre_error_flag;
}
/* Set print level for coarse grid solver */
HYPRE_Int
hypre_MGRSetCoarseGridPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> cg_print_level) = print_level;
return hypre_error_flag;
}
/* Set print level for mgr solver */
HYPRE_Int
hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> print_level) = print_level;
return hypre_error_flag;
}
/* Set logging level for mgr solver */
HYPRE_Int
hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> logging) = logging;
return hypre_error_flag;
}
/* Set max number of iterations for mgr solver */
HYPRE_Int
hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> max_iter) = max_iter;
return hypre_error_flag;
}
/* Set convergence tolerance for mgr solver */
HYPRE_Int
hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> tol) = tol;
return hypre_error_flag;
}
/* Set max number of iterations for mgr global smoother */
HYPRE_Int
hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> global_smooth_iters) = max_iter;
return hypre_error_flag;
}
/* Set global smoothing type for mgr solver */
HYPRE_Int
hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> global_smooth_type) = iter_type;
return hypre_error_flag;
}
/* Set the maximum number of non-zero entries for restriction
and interpolation operator if classical AMG interpolation is used */
HYPRE_Int
hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> P_max_elmts) = P_max_elmts;
return hypre_error_flag;
}
/* Get number of iterations for MGR solver */
HYPRE_Int
hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*num_iterations = mgr_data->num_iterations;
return hypre_error_flag;
}
/* Get residual norms for MGR solver */
HYPRE_Int
hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*res_norm = mgr_data->final_rel_residual_norm;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata, HYPRE_Real *conv_factor )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*conv_factor = (mgr_data -> cg_convergence_factor);
return hypre_error_flag;
}
/* Build A_FF matrix from A given a CF_marker array */
HYPRE_Int
hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A,
HYPRE_Int *row_cf_marker,
HYPRE_Int *col_cf_marker,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **A_block_ptr )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_IntArray *coarse_dof_func_ptr = NULL;
HYPRE_BigInt num_row_cpts_global[2];
HYPRE_BigInt num_col_cpts_global[2];
hypre_ParCSRMatrix *Ablock;
HYPRE_BigInt *col_map_offd_Ablock;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *Ablock_diag;
hypre_CSRMatrix *Ablock_offd;
HYPRE_Real *Ablock_diag_data;
HYPRE_Int *Ablock_diag_i;
HYPRE_Int *Ablock_diag_j;
HYPRE_Real *Ablock_offd_data;
HYPRE_Int *Ablock_offd_i;
HYPRE_Int *Ablock_offd_j;
HYPRE_Int Ablock_diag_size, Ablock_offd_size;
HYPRE_Int *Ablock_marker;
HYPRE_Int ii_counter;
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
HYPRE_Int *col_coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_row_cpts;
HYPRE_BigInt total_global_col_cpts;
HYPRE_Int num_cols_Ablock_offd;
// HYPRE_BigInt my_first_row_cpt, my_first_col_cpt;
HYPRE_Int i, i1;
HYPRE_Int j, jl, jj;
HYPRE_Int start;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
hypre_IntArray *wrap_cf;
// HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
/* get the number of coarse rows */
wrap_cf = hypre_IntArrayCreate(local_numrows);
hypre_IntArrayMemoryLocation(wrap_cf) = HYPRE_MEMORY_HOST;
hypre_IntArrayData(wrap_cf) = row_cf_marker;
hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr,
num_row_cpts_global);
hypre_IntArrayDestroy(coarse_dof_func_ptr);
coarse_dof_func_ptr = NULL;
//hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]);
// my_first_row_cpt = num_row_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_row_cpts = num_row_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/* get the number of coarse rows */
hypre_IntArrayData(wrap_cf) = col_cf_marker;
hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr,
num_col_cpts_global);
hypre_IntArrayDestroy(coarse_dof_func_ptr);
coarse_dof_func_ptr = NULL;
//hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]);
// my_first_col_cpt = num_col_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_col_cpts = num_col_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); }
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
int_buf_data[index++]
= col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
/*-----------------------------------------------------------------------
* First Pass: Determine size of Ablock and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; }
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a F-point, we loop through the columns and select
* the F-columns. Also set up mapping vector.
*--------------------------------------------------------------------*/
if (col_cf_marker[i] > 0)
{
fine_to_coarse[i] = col_coarse_counter[j];
col_coarse_counter[j]++;
}
if (row_cf_marker[i] > 0)
{
//fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if (col_cf_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i = 0; i < num_threads - 1; i++)
{
jj_count[i + 1] += jj_count[i];
jj_count_offd[i + 1] += jj_count_offd[i];
coarse_counter[i + 1] += coarse_counter[i];
col_coarse_counter[i + 1] += col_coarse_counter[i];
}
i = num_threads - 1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
ii_counter = coarse_counter[i];
Ablock_diag_size = jj_counter;
Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location);
Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, memory_location);
Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, memory_location);
Ablock_diag_i[ii_counter] = jj_counter;
Ablock_offd_size = jj_counter_offd;
Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location);
Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, memory_location);
Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, memory_location);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
//-----------------------------------------------------------------------
// Send and receive fine_to_coarse info.
//-----------------------------------------------------------------------
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) { coarse_shift = col_coarse_counter[j - 1]; }
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
// for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt;
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (jl < rest)
{
ns = jl * size + jl;
ne = (jl + 1) * size + jl + 1;
}
else
{
ns = jl * size + rest;
ne = (jl + 1) * size + rest;
}
jj_counter = 0;
if (jl > 0) { jj_counter = jj_count[jl - 1]; }
jj_counter_offd = 0;
if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }
ii_counter = 0;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a F-point, we loop through the columns and select
* the F-columns. Also set up mapping vector.
*--------------------------------------------------------------------*/
if (row_cf_marker[i] > 0)
{
// Diagonal part of Ablock //
Ablock_diag_i[ii_counter] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if (col_cf_marker[i1] > 0)
{
Ablock_diag_j[jj_counter] = fine_to_coarse[i1];
Ablock_diag_data[jj_counter] = A_diag_data[jj];
jj_counter++;
}
}
// Off-Diagonal part of Ablock //
Ablock_offd_i[ii_counter] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
Ablock_offd_j[jj_counter_offd] = i1;
Ablock_offd_data[jj_counter_offd] = A_offd_data[jj];
jj_counter_offd++;
}
}
}
ii_counter++;
}
}
Ablock_offd_i[ii_counter] = jj_counter_offd;
Ablock_diag_i[ii_counter] = jj_counter;
}
Ablock = hypre_ParCSRMatrixCreate(comm,
total_global_row_cpts,
total_global_col_cpts,
num_row_cpts_global,
num_col_cpts_global,
0,
Ablock_diag_i[ii_counter],
Ablock_offd_i[ii_counter]);
Ablock_diag = hypre_ParCSRMatrixDiag(Ablock);
hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data;
hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i;
hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j;
Ablock_offd = hypre_ParCSRMatrixOffd(Ablock);
hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data;
hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i;
hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j;
num_cols_Ablock_offd = 0;
if (Ablock_offd_size)
{
Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < num_cols_A_offd; i++)
{
Ablock_marker[i] = 0;
}
num_cols_Ablock_offd = 0;
for (i = 0; i < Ablock_offd_size; i++)
{
index = Ablock_offd_j[i];
if (!Ablock_marker[index])
{
num_cols_Ablock_offd++;
Ablock_marker[index] = 1;
}
}
col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, memory_location);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_cols_Ablock_offd; i++)
{
while (Ablock_marker[index] == 0) { index++; }
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < Ablock_offd_size; i++)
Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
Ablock_offd_j[i],
num_cols_Ablock_offd);
hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_Ablock_offd)
{
hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock;
hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd;
}
hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd);
/* Create the assumed partition */
if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(Ablock);
}
*A_block_ptr = Ablock;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return (0);
}
/* Build A_FF matrix from A given a CF_marker array */
HYPRE_Int
hypre_MGRBuildAff( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **A_ff_ptr )
{
HYPRE_Int i;
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* create a copy of the CF_marker array and switch C-points to F-points */
HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < local_numrows; i++)
{
CF_marker_copy[i] = -CF_marker[i];
}
hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr);
/* Free copy of CF marker */
hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST);
return (0);
}
/*********************************************************************************
* This routine assumes that the 'toVector' is larger than the 'fromVector' and
* the CF_marker is of the same length as the toVector. There must be n 'point_type'
* values in the CF_marker, where n is the length of the 'fromVector'.
* It adds the values of the 'fromVector' to the 'toVector' where the marker is the
* same as the 'point_type'
*********************************************************************************/
HYPRE_Int
hypre_MGRAddVectorP ( HYPRE_Int *CF_marker,
HYPRE_Int point_type,
HYPRE_Real a,
hypre_ParVector *fromVector,
HYPRE_Real b,
hypre_ParVector **toVector )
{
hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector);
HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal);
hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector);
HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal);
HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector);
HYPRE_Int i, j;
j = 0;
for (i = 0; i < n; i++)
{
if (CF_marker[i] == point_type)
{
toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j];
j++;
}
}
return 0;
}
/*************************************************************************************
* This routine assumes that the 'fromVector' is larger than the 'toVector' and
* the CF_marker is of the same length as the fromVector. There must be n 'point_type'
* values in the CF_marker, where n is the length of the 'toVector'.
* It adds the values of the 'fromVector' where the marker is the
* same as the 'point_type' to the 'toVector'
*************************************************************************************/
HYPRE_Int
hypre_MGRAddVectorR ( HYPRE_Int *CF_marker,
HYPRE_Int point_type,
HYPRE_Real a,
hypre_ParVector *fromVector,
HYPRE_Real b,
hypre_ParVector **toVector )
{
hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector);
HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal);
hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector);
HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal);
HYPRE_Int n = hypre_ParVectorActualLocalSize(fromVector);
HYPRE_Int i, j;
j = 0;
for (i = 0; i < n; i++)
{
if (CF_marker[i] == point_type)
{
toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i];
j++;
}
}
return 0;
}
/*
HYPRE_Int
hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions,
HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr,
hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr )
{
HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST);
HYPRE_Int i;
for (i = 0; i < local_num_variables; i++) {
CF_marker_copy[i] = -CF_marker[i];
}
hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr);
hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr);
hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr);
hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST);
return 0;
}
*/
/* Get pointer to coarse grid matrix for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> RAP == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
" Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n");
return hypre_error_flag;
}
*RAP = mgr_data->RAP;
return hypre_error_flag;
}
/* Get pointer to coarse grid solution for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> U_array == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
" MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n");
return hypre_error_flag;
}
*sol = mgr_data->U_array[mgr_data->num_coarse_levels];
return hypre_error_flag;
}
/* Get pointer to coarse grid solution for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> F_array == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
" MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n");
return hypre_error_flag;
}
*rhs = mgr_data->F_array[mgr_data->num_coarse_levels];
return hypre_error_flag;
}
/* Print coarse grid linear system (for debugging)*/
HYPRE_Int
hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
mgr_data->print_coarse_system = print_flag;
return hypre_error_flag;
}
/* Print solver params */
HYPRE_Int
hypre_MGRWriteSolverParams(void *mgr_vdata)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i, j;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
hypre_printf("MGR Setup parameters: \n");
hypre_printf("Block size: %d\n", (mgr_data -> block_size));
hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels));
hypre_printf("Relax type: %d\n", (mgr_data -> relax_type));
hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F));
hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method));
for (i = 0; i < max_num_coarse_levels; i++)
{
hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]);
hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]);
hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]);
hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i,
(mgr_data -> use_non_galerkin_cg)[i]);
HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i];
hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points);
hypre_printf("Cpoints indices: ");
for (j = 0; j < lvl_num_coarse_points; j++)
{
if ((mgr_data -> block_cf_marker)[i][j] == 1)
{
hypre_printf("%d ", j);
}
}
hypre_printf("\n");
}
hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size));
hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints));
hypre_printf("\n MGR Solver Parameters: \n");
hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps));
hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps));
hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps));
hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type));
hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters));
hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter));
hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol));
hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver));
if ((mgr_data -> use_default_fsolver) >= 0)
{
hypre_printf("Use default AMG solver for full AMG F-relaxation: %d\n",
(mgr_data -> use_default_fsolver));
}
return hypre_error_flag;
}
#ifdef HYPRE_USING_DSUPERLU
void *
hypre_MGRDirectSolverCreate()
{
hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST);
return (void *) dslu_data;
}
HYPRE_Int
hypre_MGRDirectSolverSetup( void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
/* Par Data Structure variables */
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_local;
HYPRE_Int num_rows;
HYPRE_Int num_procs, my_id;
HYPRE_Int pcols = 1, prows = 1;
HYPRE_BigInt *big_rowptr = NULL;
hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver;
HYPRE_Int info = 0;
HYPRE_Int nrhs = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* Merge diag and offd into one matrix (global ids) */
A_local = hypre_MergeDiagAndOffd(A);
num_rows = hypre_CSRMatrixNumRows(A_local);
/* Now convert hypre matrix to a SuperMatrix */
#ifdef HYPRE_MIXEDINT
{
HYPRE_Int *rowptr = NULL;
HYPRE_Int i;
rowptr = hypre_CSRMatrixI(A_local);
big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows + 1), HYPRE_MEMORY_HOST);
for (i = 0; i < (num_rows + 1); i++)
{
big_rowptr[i] = (HYPRE_BigInt)rowptr[i];
}
}
#else
big_rowptr = hypre_CSRMatrixI(A_local);
#endif
dCreate_CompRowLoc_Matrix_dist(
&(dslu_data->A_dslu), global_num_rows, global_num_rows,
hypre_CSRMatrixNumNonzeros(A_local),
num_rows,
hypre_ParCSRMatrixFirstRowIndex(A),
hypre_CSRMatrixData(A_local),
hypre_CSRMatrixBigJ(A_local), big_rowptr,
SLU_NR_loc, SLU_D, SLU_GE);
/* DOK: SuperLU frees assigned data, so set them to null before
* calling hypre_CSRMatrixdestroy on A_local to avoid memory errors.
*/
#ifndef HYPRE_MIXEDINT
hypre_CSRMatrixI(A_local) = NULL;
#endif
hypre_CSRMatrixData(A_local) = NULL;
hypre_CSRMatrixBigJ(A_local) = NULL;
hypre_CSRMatrixDestroy(A_local);
/*Create process grid */
while (prows * pcols <= num_procs) { ++prows; }
--prows;
pcols = num_procs / prows;
while (prows * pcols != num_procs)
{
prows -= 1;
pcols = num_procs / prows;
}
//hypre_printf(" prows %d pcols %d\n", prows, pcols);
superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid));
set_default_options_dist(&(dslu_data->dslu_options));
dslu_data->dslu_options.Fact = DOFACT;
dslu_data->dslu_options.PrintStat = NO;
/*dslu_data->dslu_options.IterRefine = SLU_DOUBLE;
dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A;
dslu_data->dslu_options.DiagPivotThresh = 1.0;
dslu_data->dslu_options.ReplaceTinyPivot = NO; */
dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct));
dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU));
PStatInit(&(dslu_data->dslu_data_stat));
dslu_data->global_num_rows = global_num_rows;
dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST);
dslu_data->berr[0] = 0.0;
pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu),
&(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs,
&(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU),
&(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info);
dslu_data->dslu_options.Fact = FACTORED;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRDirectSolverSolve( void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
hypre_SLUDistSolve(solver, f, u);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRDirectSolverDestroy( void *solver )
{
hypre_SLUDistDestroy(solver);
return hypre_error_flag;
}
#endif
|
ast-dump-openmp-teams-distribute-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target
#pragma omp teams distribute simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target
#pragma omp teams distribute simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target
#pragma omp teams distribute simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target
#pragma omp teams distribute simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target
#pragma omp teams distribute simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams-distribute-simd.c:3:1, line:8:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:8:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:6:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:1, col:34>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:34>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:34>
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:5:1, col:34>
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:10:1, line:16:1> line:10:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:16:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:11:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:12:1, col:34>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:34>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:34>
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:12:1, col:34>
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:18:1, line:24:1> line:18:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:24:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:19:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:20:1, col:46>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:46>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46>
// CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 1
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:20:1, col:46>
// CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 1
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:26:1, line:32:1> line:26:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:32:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:27:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:28:1, col:46>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:46>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46>
// CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:28:1, col:46>
// CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:34:1, line:41:1> line:34:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:41:1>
// CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:35:1, col:19>
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:36:1, col:46>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <col:1, col:46>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46>
// CHECK-NEXT: | | | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2
// CHECK-NEXT: | | | | `-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-OMPTeamsDistributeSimdDirective {{.*}} <line:36:1, col:46>
// CHECK-NEXT: | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2
// CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &'
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &'
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+'
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
order-2.c | void
f1 (int *a)
{
int i;
#pragma omp for order /* { dg-error "expected" } */
for (i = 0; i < 128; i++)
a[i]++;
#pragma omp for simd order : /* { dg-error "expected" } */
for (i = 0; i < 128; i++)
a[i]++;
#pragma omp simd order ( foobar ) /* { dg-error "expected" } */
for (i = 0; i < 128; i++)
a[i]++;
#pragma omp for simd order( concurrent /* { dg-error "expected" } */
for (i = 0; i < 128; i++)
a[i]++;
#pragma omp for simd order( concurrent : foo )/* { dg-error "expected" } */
for (i = 0; i < 128; i++)
a[i]++;
}
void
f2 (int *a)
{
int i;
#pragma omp teams
#pragma omp distribute order(concurrent) /* { dg-error "'order' is not valid for '#pragma omp distribute'" } */
for (i = 0; i < 128; i++)
a[i]++;
#pragma omp taskloop order (concurrent) /* { dg-error "'order' is not valid for '#pragma omp taskloop'" } */
for (i = 0; i < 128; i++)
a[i]++;
#pragma omp for order(concurrent) ordered /* { dg-error "'order' clause must not be used together with 'ordered'" } */
for (i = 0; i < 128; i++)
{
#pragma omp ordered
a[i]++;
}
#pragma omp for ordered order(concurrent) /* { dg-error "'order' clause must not be used together with 'ordered'" } */
for (i = 0; i < 128; i++)
{
#pragma omp ordered
a[i]++;
}
#pragma omp for ordered (1) order(concurrent) /* { dg-error "'order' clause must not be used together with 'ordered'" } */
for (i = 0; i < 128; i++)
{
#pragma omp ordered depend (sink: i - 1)
#pragma omp ordered depend (source)
}
#pragma omp for order(concurrent)ordered (1) /* { dg-error "'order' clause must not be used together with 'ordered'" } */
for (i = 0; i < 128; i++)
{
#pragma omp ordered depend (sink: i - 1)
#pragma omp ordered depend (source)
}
}
|
pr58756.c | /* PR libgomp/58756 */
/* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
extern void abort (void);
int d[32 * 32];
__attribute__((noinline, noclone)) int
foo (int a, int b)
{
int j, c = 0;
#pragma omp parallel for reduction(+: c)
for (j = 0; j < a; j += 32)
{
int l;
#pragma omp simd reduction(+: c) safelen(1)
for (l = 0; l < b; ++l)
c += d[j + l];
}
return c;
}
__attribute__((noinline, noclone)) int
bar (int a)
{
int j, c = 0;
#pragma omp parallel for simd reduction(+: c) safelen(1)
for (j = 0; j < a; ++j)
c += d[j];
return c;
}
__attribute__((noinline)) static int
baz (int a)
{
int j, c = 0;
#pragma omp simd reduction(+: c) safelen(1)
for (j = 0; j < a; ++j)
c += d[j];
return c;
}
int
main ()
{
int i;
for (i = 0; i < 32 * 32; i++)
d[i] = (i & 31);
if (foo (32 * 32, 32) != (31 * 32 / 2) * 32)
abort ();
if (bar (32 * 32) != (31 * 32 / 2) * 32)
abort ();
if (baz (32 * 32) != (31 * 32 / 2) * 32)
abort ();
return 0;
}
|
naive_math_impl.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cmath>
template <typename type>
static void basic_trans_mat_to_c4(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 3) / 4 * 4;
int k_round = (K + 3) / 4 * 4;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 4;
type* zero_buf = new type[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 4 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
if (4 * (i + 1) - M > 0) {
switch (4 * (i + 1) - M) {
case 3:
in1 = zero_buf;
case 2:
in2 = zero_buf;
case 1:
in3 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
delete[] zero_buf;
}
template <typename type>
static void basic_trans_mat_to_c8(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 7) / 8 * 8;
int k_round = (K + 7) / 8 * 8;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 8;
type zero_buf[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 8 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
const type* in4 = in3 + ldin;
const type* in5 = in4 + ldin;
const type* in6 = in5 + ldin;
const type* in7 = in6 + ldin;
if (8 * (i + 1) - M > 0) {
switch (8 * (i + 1) - M) {
case 7:
in1 = zero_buf;
case 6:
in2 = zero_buf;
case 5:
in3 = zero_buf;
case 4:
in4 = zero_buf;
case 3:
in5 = zero_buf;
case 2:
in6 = zero_buf;
case 1:
in7 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
*output++ = *in4++;
*output++ = *in5++;
*output++ = *in6++;
*output++ = *in7++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
}
template <typename type, typename type2>
static void basic_gemm_c4(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c4(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm_c8(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c8(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data;
if (flag_relu) {
c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
c[i * ldc + j] = tmp;
}
}
}
}
template <typename type, typename type2>
static void basic_gemv(int m,
int k,
const type* a,
const type* b,
const type2* bias,
type2* c,
type2 alpha,
type2 beta,
bool trans_a = false,
bool flag_bias = false,
int flag_act = false,
float six = 6.f,
float leakey_relu_alpha = 1.f) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
auto sum = static_cast<type2>(0);
for (int j = 0; j < k; ++j) {
type av;
if (trans_a) {
av = a[j * m + i];
} else {
av = a[i * k + j];
}
sum += av * b[j];
}
type2 tmp = alpha * sum + beta * c[i] + bias_data;
if (flag_act > 0) {
if (flag_act == 1) { // relu
c[i] = tmp > (type2)0 ? tmp : (type2)0;
} else if (flag_act == 2) { // relu 6
c[i] = tmp > (type2)0 ? tmp : (type2)0;
c[i] = c[i] < six ? c[i] : six; // ut compute
} else if (flag_act == 4) { // leakey relu
c[i] = tmp < (type2)0 ? (type2)(tmp * leakey_relu_alpha) : tmp;
}
} else {
c[i] = tmp;
}
}
}
/**
* \brief basic direct convolution function
*/
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
static void conv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
int act_type,
float six = 6.f,
float scale = 1.f) {
Dtype2 beta = 0;
auto src_data = din;
auto dst_data_ref = dout;
auto weights_data = weights;
auto with_bias = flag_bias;
auto bias_data = bias;
int in_num = num;
int out_channels = chout;
int out_h = hout;
int out_w = wout;
int in_channel = chin;
int in_h = hin;
int in_w = win;
int out_c_group = out_channels / group;
int in_c_group = in_channel / group;
for (int n = 0; n < in_num; ++n) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < out_h; ++oh) {
for (int ow = 0; ow < out_w; ++ow) {
int out_idx = n * group * out_c_group * out_h * out_w +
g * out_c_group * out_h * out_w + oc * out_h * out_w +
oh * out_w + ow;
Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0;
dst_data_ref[out_idx] = bias_d + dst_data_ref[out_idx] * beta;
for (int ic = 0; ic < in_c_group; ++ic) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
int iw = ow * stride_w - pad_w + kw * (dila_w);
int ih = oh * stride_h - pad_h + kh * (dila_h);
if (iw < 0 || iw >= in_w) continue;
if (ih < 0 || ih >= in_h) continue;
int iidx = n * in_channel * in_h * in_w +
g * in_c_group * in_h * in_w + ic * in_h * in_w +
ih * in_w + iw;
int widx =
g * out_c_group * in_c_group * kernel_h * kernel_w +
oc * in_c_group * kernel_h * kernel_w +
ic * kernel_h * kernel_w + kh * kernel_w + kw;
dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx];
}
}
}
if (act_type > 0) {
// 1-relu 2-relu6 4-leakyrelu
if (act_type == 1) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
} else if (act_type == 2) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
dst_data_ref[out_idx] = dst_data_ref[out_idx] < (Dtype2)six
? dst_data_ref[out_idx]
: (Dtype2)six;
} else if (act_type == 4) {
dst_data_ref[out_idx] =
dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)(dst_data_ref[out_idx] * scale);
} else {
printf("this act type: %d does not support \n", act_type);
}
}
}
}
}
}
}
}
template <typename Dtype>
static void fill_bias_relu(Dtype* tensor,
const Dtype* bias,
int channel,
int channel_size,
bool flag_bias,
bool flag_relu) {
Dtype* data = tensor;
for (int j = 0; j < channel; ++j) {
Dtype bias_c = flag_bias ? bias[j] : 0;
for (int i = 0; i < channel_size; i++) {
data[i] += bias_c;
if (flag_relu) {
data[i] = data[i] > 0 ? data[i] : 0.f;
}
}
data += channel_size;
}
}
template <typename Dtype>
static void do_relu(Dtype* tensor, int size) {
for (int j = 0; j < size; ++j) {
tensor[j] = tensor[j] > 0 ? tensor[j] : (Dtype)0;
}
}
inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
return static_cast<unsigned>(a) < static_cast<unsigned>(b);
}
template <typename Dtype>
static void col2im(const Dtype* data_col,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h0,
const int pad_h1,
const int pad_w0,
const int pad_w1,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
Dtype* data_im) {
memset(data_im, 0, height * width * channels * sizeof(Dtype));
const int output_h =
(height + pad_h0 + pad_h1 - (dilation_h * (kernel_h - 1) + 1)) /
stride_h +
1;
const int output_w =
(width + pad_w0 + pad_w1 - (dilation_w * (kernel_w - 1) + 1)) / stride_w +
1;
const int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h0 + kernel_row * dilation_h;
for (int output_rows = output_h; output_rows; output_rows--) {
if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
data_col += output_w;
} else {
int input_col = -pad_w0 + kernel_col * dilation_w;
for (int output_col = output_w; output_col; output_col--) {
if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
data_im[input_row * width + input_col] += *data_col;
}
data_col++;
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deconv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w0,
int pad_w1,
int pad_h0,
int pad_h1,
bool flag_bias,
bool flag_relu) {
int m = chout * kernel_w * kernel_h / group;
int n = hin * win;
int k = chin / group;
int group_size_in = win * hin * chin / group;
int group_size_coldata = m * n;
int group_size_weights = chin * chout * kernel_w * kernel_h / (group * group);
bool flag_1x1s1p1 = (kernel_w == 1) && (kernel_h == 1) && (stride_h == 1) &&
(stride_w == 1) && (pad_w0 == 0) && (pad_h0 == 0) &&
(pad_w1 == 0) && (pad_h1 == 0) && (dila_w == 1) &&
(dila_h == 1);
Dtype2* workspace_ptr =
static_cast<Dtype2*>(malloc(sizeof(float) * m * n * group));
for (int i = 0; i < num; ++i) {
const Dtype1* din_batch = din + i * chin * hin * win;
Dtype2* dout_batch = dout + i * chout * hout * wout;
Dtype2* col_data = workspace_ptr;
if (flag_1x1s1p1) {
col_data = dout_batch;
}
memset(col_data, 0, sizeof(Dtype2) * group_size_coldata * group);
for (int g = 0; g < group; ++g) {
const Dtype1* din_group = din_batch + g * group_size_in;
const Dtype1* weights_group = weights + g * group_size_weights;
Dtype2* coldata_group = col_data + g * group_size_coldata;
basic_gemm<Dtype1, Dtype2>(true,
false,
m,
n,
k,
1,
weights_group,
m,
din_group,
n,
0,
coldata_group,
n,
nullptr,
false,
(!flag_bias && flag_relu));
}
if (!flag_1x1s1p1) {
col2im(col_data,
chout,
hout,
wout,
kernel_h,
kernel_w,
pad_h0,
pad_h1,
pad_w0,
pad_w1,
stride_h,
stride_w,
dila_h,
dila_w,
dout_batch);
}
//! add bias
if (flag_bias) {
fill_bias_relu(
dout_batch, bias, chout, wout * hout, flag_bias, flag_relu);
}
}
free(workspace_ptr);
}
float deformable_bilinear(const float* bottom_data,
const int data_width,
const int height,
const int width,
float h,
float w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = static_cast<float>(h_low);
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = static_cast<float>(w_low);
} else {
w_high = w_low + 1;
}
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh;
float hw = 1 - lw;
float v1 = bottom_data[h_low * data_width + w_low];
float v2 = bottom_data[h_low * data_width + w_high];
float v3 = bottom_data[h_high * data_width + w_low];
float v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw;
float w2 = hh * lw;
float w3 = lh * hw;
float w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deformable_conv_basic(const Dtype1* in_data,
const float* offset_data,
const float* mask_data,
Dtype2* out_data,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
bool flag_relu,
bool modulated) {
int out_c_group = chout / group;
int in_c_group = chin / group;
int in_size = hin * win;
int out_size = hout * wout;
int c_in_size = chin * in_size;
int c_out_size = chout * out_size;
int kernel_size = kernel_w * kernel_h;
for (int n = 0; n < num; n++) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < hout; oh++) {
for (int ow = 0; ow < wout; ow++) {
int out_idx = n * c_out_size + g * out_c_group * out_size +
oc * out_size + oh * wout + ow;
Dtype2 bias_d = flag_bias ? bias[g * out_c_group + oc] : 0;
out_data[out_idx] = bias_d + out_data[out_idx];
for (int ic = 0; ic < in_c_group; ++ic) {
for (int fh = 0; fh < kernel_h; fh++) {
for (int fw = 0; fw < kernel_w; fw++) {
const float* offset_data_ptr =
offset_data + n * group * 2 * kernel_size * out_size +
g * 2 * kernel_size * out_size;
const int data_offset_h_ptr =
((2 * (fh * kernel_w + fw)) * hout + oh) * wout + ow;
const int data_offset_w_ptr =
((2 * (fh * kernel_w + fw) + 1) * hout + oh) * wout + ow;
const float offset_h = offset_data_ptr[data_offset_h_ptr];
const float offset_w = offset_data_ptr[data_offset_w_ptr];
const float iw =
ow * stride_w - pad_w + kernel_w * dila_w + offset_w;
const float ih =
oh * stride_h - pad_h + kernel_h * dila_h + offset_h;
if (ih >= 0 && ih < hin && iw >= 0 && iw < win) {
const float map_h = kernel_h * dila_h + offset_h;
const float map_w = kernel_w * dila_w + offset_w;
const int cur_height = hin - (oh * stride_h - pad_h);
const int cur_width = win - (ow * stride_w - pad_w);
const float* in_data_offset =
in_data + n * c_in_size +
(g * in_c_group + ic) * in_size +
(oh * stride_h - pad_h) * win + (ow * stride_w - pad_w);
float val = deformable_bilinear(in_data_offset,
win,
cur_height,
cur_width,
map_h,
map_w);
if (modulated) {
// use mask
const float* mask_ptr =
mask_data + n * group * kernel_size * out_size +
g * kernel_size * out_size +
(fh * kernel_w + fw) * hout * wout + oh * wout + ow;
val *= mask_ptr[0];
}
int widx = g * out_c_group * in_c_group * kernel_size +
oc * in_c_group * kernel_size +
ic * kernel_size + fh * kernel_w + fw;
out_data[out_idx] += val * weights[widx];
}
}
}
}
if (flag_relu) {
out_data[out_idx] = out_data[out_idx] > 0 ? out_data[out_idx] : 0;
}
}
}
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.